^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include "misc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "block-group.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "space-info.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "free-space-cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "free-space-tree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "volumes.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ref-verify.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "sysfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "tree-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "delalloc-space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "discard.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "raid56.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Return target flags in extended format or 0 if restripe for this chunk_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * is not in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Should be called with balance_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct btrfs_balance_control *bctl = fs_info->balance_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 target = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (!bctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (flags & BTRFS_BLOCK_GROUP_DATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @flags: available profiles in extended format (see ctree.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Return reduced profile in chunk format. If profile changing is in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * (either running or paused) picks the target profile (if it's already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * available), otherwise falls back to plain reducing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 num_devices = fs_info->fs_devices->rw_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u64 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u64 raid_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 allowed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * See if restripe for this chunk_type is in progress, if so try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * reduce to the target profile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) spin_lock(&fs_info->balance_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) target = get_restripe_target(fs_info, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) spin_unlock(&fs_info->balance_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return extended_to_chunk(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) spin_unlock(&fs_info->balance_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* First, mask out the RAID levels which aren't possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (num_devices >= btrfs_raid_array[raid_type].devs_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) allowed |= btrfs_raid_array[raid_type].bg_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) allowed &= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (allowed & BTRFS_BLOCK_GROUP_RAID6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) allowed = BTRFS_BLOCK_GROUP_RAID6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) allowed = BTRFS_BLOCK_GROUP_RAID5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) allowed = BTRFS_BLOCK_GROUP_RAID10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) allowed = BTRFS_BLOCK_GROUP_RAID1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) allowed = BTRFS_BLOCK_GROUP_RAID0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return extended_to_chunk(flags | allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) flags = orig_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) seq = read_seqbegin(&fs_info->profiles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (flags & BTRFS_BLOCK_GROUP_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) flags |= fs_info->avail_data_alloc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) flags |= fs_info->avail_system_alloc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) else if (flags & BTRFS_BLOCK_GROUP_METADATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) flags |= fs_info->avail_metadata_alloc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } while (read_seqretry(&fs_info->profiles_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return btrfs_reduce_alloc_profile(fs_info, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void btrfs_get_block_group(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) refcount_inc(&cache->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void btrfs_put_block_group(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (refcount_dec_and_test(&cache->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) WARN_ON(cache->pinned > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) WARN_ON(cache->reserved > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * A block_group shouldn't be on the discard_list anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Remove the block_group from the discard_list to prevent us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * from causing a panic due to NULL pointer dereference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (WARN_ON(!list_empty(&cache->discard_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If not empty, someone is still holding mutex of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * full_stripe_lock, which can only be released by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * And it will definitely cause use-after-free when caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * tries to release full stripe lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * No better way to resolve, but only to warn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(cache->free_space_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kfree(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * This adds the block group to the fs_info rb tree for the block group cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ASSERT(block_group->length != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) spin_lock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) p = &info->block_group_cache_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cache = rb_entry(parent, struct btrfs_block_group, cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (block_group->start < cache->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } else if (block_group->start > cache->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) spin_unlock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rb_link_node(&block_group->cache_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rb_insert_color(&block_group->cache_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) &info->block_group_cache_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (info->first_logical_byte > block_group->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) info->first_logical_byte = block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_unlock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * This will return the block group at or after bytenr if contains is 0, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * it will return the block group that contains the bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static struct btrfs_block_group *block_group_cache_tree_search(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct btrfs_fs_info *info, u64 bytenr, int contains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct btrfs_block_group *cache, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u64 end, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_lock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) n = info->block_group_cache_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cache = rb_entry(n, struct btrfs_block_group, cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) end = cache->start + cache->length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) start = cache->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (bytenr < start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!contains && (!ret || start < ret->start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else if (bytenr > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (contains && bytenr <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) btrfs_get_block_group(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (bytenr == 0 && info->first_logical_byte > ret->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) info->first_logical_byte = ret->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_unlock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Return the block group that starts at or after bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct btrfs_block_group *btrfs_lookup_first_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct btrfs_fs_info *info, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return block_group_cache_tree_search(info, bytenr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Return the block group that contains the given bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct btrfs_block_group *btrfs_lookup_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct btrfs_fs_info *info, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return block_group_cache_tree_search(info, bytenr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct btrfs_block_group *btrfs_next_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_lock(&fs_info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* If our block group was removed, we need a full search. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (RB_EMPTY_NODE(&cache->cache_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const u64 next_bytenr = cache->start + cache->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_unlock(&fs_info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) node = rb_next(&cache->cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cache = rb_entry(node, struct btrfs_block_group, cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) btrfs_get_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_unlock(&fs_info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct btrfs_block_group *bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bg = btrfs_lookup_block_group(fs_info, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (bg->ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) atomic_inc(&bg->nocow_writers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* No put on block group, done by btrfs_dec_nocow_writers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct btrfs_block_group *bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bg = btrfs_lookup_block_group(fs_info, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ASSERT(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (atomic_dec_and_test(&bg->nocow_writers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) wake_up_var(&bg->nocow_writers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Once for our lookup and once for the lookup done by a previous call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * to btrfs_inc_nocow_writers()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const u64 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct btrfs_block_group *bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) bg = btrfs_lookup_block_group(fs_info, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ASSERT(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (atomic_dec_and_test(&bg->reservations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) wake_up_var(&bg->reservations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct btrfs_space_info *space_info = bg->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ASSERT(bg->ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Our block group is read only but before we set it to read only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * some task might have had allocated an extent from it already, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * has not yet created a respective ordered extent (and added it to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * root's list of ordered extents).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Therefore wait for any task currently allocating extents, since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * block group's reservations counter is incremented while a read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * on the groups' semaphore is held and decremented after releasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * the read access on that semaphore and creating the ordered extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) down_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) up_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct btrfs_caching_control *btrfs_get_caching_control(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct btrfs_caching_control *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!cache->caching_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ctl = cache->caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) refcount_inc(&ctl->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (refcount_dec_and_test(&ctl->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) kfree(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * When we wait for progress in the block group caching, its because our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * allocation attempt failed at least once. So, we must sleep and let some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * progress happen before we try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * This function will sleep at least once waiting for new free space to show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * up, and then it will check the block group free space numbers for our min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * num_bytes. Another option is to have it go ahead and look in the rbtree for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * a free extent of a given size, but this is a good start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * any of the information in this block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u64 num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) caching_ctl = btrfs_get_caching_control(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (cache->free_space_ctl->free_space >= num_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) caching_ctl = btrfs_get_caching_control(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (cache->cached == BTRFS_CACHE_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void fragment_free_space(struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct btrfs_fs_info *fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u64 start = block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u64 len = block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) fs_info->nodesize : fs_info->sectorsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u64 step = chunk << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) while (len > chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) btrfs_remove_free_space(block_group, start, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) start += step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (len < step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) len -= step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * This is only called by btrfs_cache_block_group, since we could have freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * extents we need to check the pinned_extents for any extents that can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * used yet since their free space will be released as soon as the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * commits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct btrfs_fs_info *info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u64 extent_start, extent_end, size, total_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = find_first_extent_bit(&info->excluded_extents, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) &extent_start, &extent_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) EXTENT_DIRTY | EXTENT_UPTODATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (extent_start <= start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) start = extent_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) } else if (extent_start > start && extent_start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) size = extent_start - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) total_added += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ret = btrfs_add_free_space_async_trimmed(block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) BUG_ON(ret); /* -ENOMEM or logic error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) start = extent_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) size = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) total_added += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = btrfs_add_free_space_async_trimmed(block_group, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) BUG_ON(ret); /* -ENOMEM or logic error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return total_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct btrfs_block_group *block_group = caching_ctl->block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct btrfs_fs_info *fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct btrfs_root *extent_root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) u64 total_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u64 last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u32 nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bool wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * If we're fragmenting we don't want to make anybody think we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * allocate from this block group until we've had a chance to fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * the free space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (btrfs_should_fragment_free_space(block_group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * We don't want to deadlock with somebody trying to allocate a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * extent for the extent root while also trying to search the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * root to add free space. So we skip locking and search the commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * root, since its read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) path->reada = READA_FORWARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) key.objectid = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) nritems = btrfs_header_nritems(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (btrfs_fs_closing(fs_info) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) last = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (path->slots[0] < nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (need_resched() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) rwsem_is_contended(&fs_info->commit_root_sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) caching_ctl->progress = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mutex_unlock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mutex_lock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = btrfs_next_leaf(extent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) nritems = btrfs_header_nritems(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (key.objectid < last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) key.objectid = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) caching_ctl->progress = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (key.objectid < block_group->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (key.objectid >= block_group->start + block_group->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (key.type == BTRFS_EXTENT_ITEM_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) key.type == BTRFS_METADATA_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) total_found += add_new_free_space(block_group, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (key.type == BTRFS_METADATA_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) last = key.objectid +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) last = key.objectid + key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (total_found > CACHING_CTL_WAKE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) total_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) wake_up(&caching_ctl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) total_found += add_new_free_space(block_group, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) block_group->start + block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) caching_ctl->progress = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static noinline void caching_thread(struct btrfs_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) caching_ctl = container_of(work, struct btrfs_caching_control, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) block_group = caching_ctl->block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mutex_lock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * If we are in the transaction that populated the free space tree we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * can't actually cache from the free space tree as our commit root and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * real root are the same, so we could change the contents of the blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * while caching. Instead do the slow caching in this case, and after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * the transaction has committed we will be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ret = load_free_space_tree(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ret = load_extent_tree_free(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) block_group->caching_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (btrfs_should_fragment_free_space(block_group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) u64 bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_lock(&block_group->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bytes_used = block_group->length - block_group->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) block_group->space_info->bytes_used += bytes_used >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_unlock(&block_group->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) fragment_free_space(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) caching_ctl->progress = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) btrfs_free_excluded_extents(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) mutex_unlock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) wake_up(&caching_ctl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) INIT_LIST_HEAD(&caching_ctl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) mutex_init(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) init_waitqueue_head(&caching_ctl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) caching_ctl->block_group = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) caching_ctl->progress = cache->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) refcount_set(&caching_ctl->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * This should be a rare occasion, but this could happen I think in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * case where one thread starts to load the space cache info, and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * some other thread starts a transaction commit which tries to do an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * allocation while the other thread is still loading the space cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * info. The previous loop should have kept us from choosing this block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * group, but if we've moved to the state where we will wait on caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * block groups we need to first check if we're doing a fast load here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * so we can wait for it to finish, otherwise we could end up allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * from a block group who's cache gets evicted for one reason or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) while (cache->cached == BTRFS_CACHE_FAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct btrfs_caching_control *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ctl = cache->caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) refcount_inc(&ctl->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) finish_wait(&ctl->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) btrfs_put_caching_control(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (cache->cached != BTRFS_CACHE_NO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) kfree(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) WARN_ON(cache->caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cache->caching_ctl = caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) cache->cached = BTRFS_CACHE_FAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) mutex_lock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = load_free_space_cache(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cache->caching_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cache->cached = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) cache->last_byte_to_unpin = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) caching_ctl->progress = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (load_cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cache->caching_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cache->cached = BTRFS_CACHE_NO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) cache->cached = BTRFS_CACHE_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) cache->has_caching_ctl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (ret == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) btrfs_should_fragment_free_space(cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u64 bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_lock(&cache->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) bytes_used = cache->length - cache->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cache->space_info->bytes_used += bytes_used >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_unlock(&cache->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) fragment_free_space(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) mutex_unlock(&caching_ctl->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) wake_up(&caching_ctl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * We're either using the free space tree or no caching at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * Set cached to the appropriate value and wakeup any waiters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (load_cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) cache->caching_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) cache->cached = BTRFS_CACHE_NO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) cache->cached = BTRFS_CACHE_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) cache->has_caching_ctl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) wake_up(&caching_ctl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (load_cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) down_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) refcount_inc(&caching_ctl->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) up_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) btrfs_get_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u64 extra_flags = chunk_to_extended(flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) BTRFS_EXTENDED_PROFILE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) write_seqlock(&fs_info->profiles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (flags & BTRFS_BLOCK_GROUP_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) fs_info->avail_data_alloc_bits &= ~extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (flags & BTRFS_BLOCK_GROUP_METADATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) fs_info->avail_metadata_alloc_bits &= ~extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) fs_info->avail_system_alloc_bits &= ~extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) write_sequnlock(&fs_info->profiles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * Clear incompat bits for the following feature(s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * in the whole filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) bool found_raid56 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) bool found_raid1c34 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct list_head *head = &fs_info->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct btrfs_space_info *sinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) list_for_each_entry_rcu(sinfo, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) down_read(&sinfo->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) found_raid56 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) found_raid56 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) found_raid1c34 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) found_raid1c34 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) up_read(&sinfo->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!found_raid56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) btrfs_clear_fs_incompat(fs_info, RAID56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!found_raid1c34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) btrfs_clear_fs_incompat(fs_info, RAID1C34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int remove_block_group_item(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) key.objectid = block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) key.offset = block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ret = btrfs_del_item(trans, root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u64 group_start, struct extent_map *em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct btrfs_free_cluster *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct btrfs_root *tree_root = fs_info->tree_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct kobject *kobj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) int factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct btrfs_caching_control *caching_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bool remove_em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) bool remove_rsv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) block_group = btrfs_lookup_block_group(fs_info, group_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) BUG_ON(!block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) BUG_ON(!block_group->ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) trace_btrfs_remove_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Free the reserved super bytes from this block group before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * remove it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) btrfs_free_excluded_extents(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) btrfs_free_ref_tree_range(fs_info, block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) index = btrfs_bg_flags_to_raid_index(block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) factor = btrfs_bg_type_to_factor(block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* make sure this block group isn't part of an allocation cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cluster = &fs_info->data_alloc_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_lock(&cluster->refill_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) btrfs_return_cluster_to_free_space(block_group, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) spin_unlock(&cluster->refill_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * make sure this block group isn't part of a metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * allocation cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cluster = &fs_info->meta_alloc_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) spin_lock(&cluster->refill_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) btrfs_return_cluster_to_free_space(block_group, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_unlock(&cluster->refill_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * get the inode first so any iput calls done for the io_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * aren't the final iput (no unlinks allowed now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) inode = lookup_free_space_inode(block_group, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) mutex_lock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * Make sure our free space cache IO is done before removing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * free space inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) spin_lock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!list_empty(&block_group->io_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) list_del_init(&block_group->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) spin_unlock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) btrfs_wait_cache_io(trans, block_group, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) spin_lock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!list_empty(&block_group->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) list_del_init(&block_group->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) remove_rsv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) spin_unlock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mutex_unlock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret = btrfs_orphan_add(trans, BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) btrfs_add_delayed_iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) clear_nlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* One for the block groups ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (block_group->iref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) block_group->iref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) block_group->inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* One for our lookup ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) btrfs_add_delayed_iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) key.objectid = BTRFS_FREE_SPACE_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) key.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) key.offset = block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret = btrfs_del_item(trans, tree_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_lock(&fs_info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) rb_erase(&block_group->cache_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) &fs_info->block_group_cache_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) RB_CLEAR_NODE(&block_group->cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* Once for the block groups rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (fs_info->first_logical_byte == block_group->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) fs_info->first_logical_byte = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_unlock(&fs_info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) down_write(&block_group->space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * we must use list_del_init so people can check to see if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * are still on the list after taking the semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) list_del_init(&block_group->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (list_empty(&block_group->space_info->block_groups[index])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) kobj = block_group->space_info->block_group_kobjs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) block_group->space_info->block_group_kobjs[index] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) clear_avail_alloc_bits(fs_info, block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) up_write(&block_group->space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) clear_incompat_bg_bits(fs_info, block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) kobject_del(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) kobject_put(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (block_group->has_caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) caching_ctl = btrfs_get_caching_control(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (block_group->cached == BTRFS_CACHE_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) btrfs_wait_block_group_cache_done(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (block_group->has_caching_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) down_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!caching_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct btrfs_caching_control *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) list_for_each_entry(ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) &fs_info->caching_block_groups, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ctl->block_group == block_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) caching_ctl = ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) refcount_inc(&caching_ctl->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (caching_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) list_del_init(&caching_ctl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) up_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (caching_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* Once for the caching bgs list and once for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) spin_lock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) WARN_ON(!list_empty(&block_group->dirty_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) WARN_ON(!list_empty(&block_group->io_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_unlock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) btrfs_remove_free_space_cache(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) spin_lock(&block_group->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) list_del_init(&block_group->ro_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) WARN_ON(block_group->space_info->total_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) < block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) WARN_ON(block_group->space_info->bytes_readonly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) < block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) WARN_ON(block_group->space_info->disk_total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) < block_group->length * factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) block_group->space_info->total_bytes -= block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) block_group->space_info->bytes_readonly -= block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) block_group->space_info->disk_total -= block_group->length * factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) spin_unlock(&block_group->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Remove the free space for the block group from the free space tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * and the block group's item from the extent tree before marking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * block group as removed. This is to prevent races with tasks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * freeze and unfreeze a block group, this task and another task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * allocating a new block group - the unfreeze task ends up removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * the block group's extent map before the task calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * deletes the block group item from the extent tree, allowing for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * another task to attempt to create another block group with the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * item key (and failing with -EEXIST and a transaction abort).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ret = remove_block_group_free_space(trans, block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = remove_block_group_item(trans, path, block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) block_group->removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * At this point trimming or scrub can't start on this block group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * because we removed the block group from the rbtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * fs_info->block_group_cache_tree so no one can't find it anymore and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * even if someone already got this block group before we removed it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * from the rbtree, they have already incremented block_group->frozen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * if they didn't, for the trimming case they won't find any free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * entries because we already removed them all when we called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * btrfs_remove_free_space_cache().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * And we must not remove the extent map from the fs_info->mapping_tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * to prevent the same logical address range and physical device space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * ranges from being reused for a new block group. This is needed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * avoid races with trimming and scrub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * completely transactionless, so while it is trimming a range the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * currently running transaction might finish and a new one start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * allowing for new block groups to be created that can reuse the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * physical device locations unless we take this special care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * There may also be an implicit trim operation if the file system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * is mounted with -odiscard. The same protections must remain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * in place until the extents have been discarded completely when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * the transaction commit has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) remove_em = (atomic_read(&block_group->frozen) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (remove_em) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct extent_map_tree *em_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) em_tree = &fs_info->mapping_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) write_lock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) remove_extent_mapping(em_tree, em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) write_unlock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* once for the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /* Once for the lookup reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (remove_rsv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) btrfs_delayed_refs_rsv_release(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct btrfs_fs_info *fs_info, const u64 chunk_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct extent_map_tree *em_tree = &fs_info->mapping_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct map_lookup *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) unsigned int num_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) read_lock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) em = lookup_extent_mapping(em_tree, chunk_offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) read_unlock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ASSERT(em && em->start == chunk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * We need to reserve 3 + N units from the metadata space info in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * to remove a block group (done at btrfs_remove_chunk() and at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * btrfs_remove_block_group()), which are used for:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * 1 unit for adding the free space inode's orphan (located in the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * of tree roots).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * 1 unit for deleting the block group item (located in the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * tree).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * 1 unit for deleting the free space item (located in tree of tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * roots).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * N units for deleting N device extent items corresponding to each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * stripe (located in the device tree).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * In order to remove a block group we also need to reserve units in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * system space info in order to update the chunk tree (update one or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * more device items and remove one chunk item), but this is done at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * btrfs_remove_chunk() through a call to check_system_chunk().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) map = em->map_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) num_items = 3 + map->num_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) num_items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * Mark block group @cache read-only, so later write won't happen to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * group @cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * If @force is not set, this function will only mark the block group readonly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * if we have enough free space (1M) in other metadata/system block groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * If @force is not set, this function will mark the block group readonly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * without checking free space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * NOTE: This function doesn't care if other block groups can contain all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * data in this block group. That check should be done by relocation routine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * not this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct btrfs_space_info *sinfo = cache->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) spin_lock(&sinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (cache->swap_extents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ret = -ETXTBSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (cache->ro) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) cache->ro++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) num_bytes = cache->length - cache->reserved - cache->pinned -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) cache->bytes_super - cache->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Data never overcommits, even in mixed mode, so do just the straight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * check of left over space in how much we have allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) u64 sinfo_used = btrfs_space_info_used(sinfo, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * Here we make sure if we mark this bg RO, we still have enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * free space as buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (sinfo_used + num_bytes <= sinfo->total_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * We overcommit metadata, so we need to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * btrfs_can_overcommit check here, and we need to pass in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * leeway to allow us to mark this block group as read only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) BTRFS_RESERVE_NO_FLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) sinfo->bytes_readonly += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) cache->ro++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) spin_unlock(&sinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) btrfs_info(cache->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) "unable to make block group %llu ro", cache->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct btrfs_block_group *bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct btrfs_fs_info *fs_info = bg->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct btrfs_transaction *prev_trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) const u64 start = bg->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) const u64 end = start + bg->length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_lock(&fs_info->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (trans->transaction->list.prev != &fs_info->trans_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) prev_trans = list_last_entry(&trans->transaction->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct btrfs_transaction, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) refcount_inc(&prev_trans->use_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) spin_unlock(&fs_info->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * Hold the unused_bg_unpin_mutex lock to avoid racing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * btrfs_finish_extent_commit(). If we are at transaction N, another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * task might be running finish_extent_commit() for the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * transaction N - 1, and have seen a range belonging to the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * group in pinned_extents before we were able to clear the whole block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * group range from pinned_extents. This means that task can lookup for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * the block group after we unpinned it from pinned_extents and removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * it, leading to a BUG_ON() at unpin_extent_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) mutex_lock(&fs_info->unused_bg_unpin_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (prev_trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) EXTENT_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) EXTENT_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) mutex_unlock(&fs_info->unused_bg_unpin_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (prev_trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) btrfs_put_transaction(prev_trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * Process the unused_bgs list and remove any that don't have any allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * space inside of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) spin_lock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) while (!list_empty(&fs_info->unused_bgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) int trimming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) block_group = list_first_entry(&fs_info->unused_bgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) list_del_init(&block_group->bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) space_info = block_group->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (ret || btrfs_mixed_space_info(space_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_unlock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) mutex_lock(&fs_info->delete_unused_bgs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Don't want to race with allocators so take the groups_sem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) down_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * Async discard moves the final block group discard to be prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * to the unused_bgs code path. Therefore, if it's not fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * trimmed, punt it back to the async discard lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) !btrfs_is_free_space_trimmed(block_group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) trace_btrfs_skip_unused_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) up_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* Requeue if we failed because of async discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) btrfs_discard_queue_work(&fs_info->discard_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (block_group->reserved || block_group->pinned ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) block_group->used || block_group->ro ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) list_is_singular(&block_group->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * We want to bail if we made new allocations or have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * outstanding allocations in this block group. We do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * the ro check in case balance is currently acting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * this block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) trace_btrfs_skip_unused_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) up_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* We don't want to force the issue, only flip if it's ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ret = inc_block_group_ro(block_group, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) up_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * Want to do this before we do anything else so we can recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * properly if we fail to join the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) trans = btrfs_start_trans_remove_block_group(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) block_group->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) btrfs_dec_block_group_ro(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * We could have pending pinned extents for this block group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * just delete them, we don't care about them anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!clean_pinned_extents(trans, block_group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) btrfs_dec_block_group_ro(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto end_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * At this point, the block_group is read only and should fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * new allocations. However, btrfs_finish_extent_commit() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * cause this block_group to be placed back on the discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * lists because now the block_group isn't fully discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * Bail here and try again later after discarding everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) spin_lock(&fs_info->discard_ctl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (!list_empty(&block_group->discard_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) spin_unlock(&fs_info->discard_ctl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) btrfs_dec_block_group_ro(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) btrfs_discard_queue_work(&fs_info->discard_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) goto end_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) spin_unlock(&fs_info->discard_ctl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* Reset pinned so btrfs_put_block_group doesn't complain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) spin_lock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) btrfs_space_info_update_bytes_pinned(fs_info, space_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) -block_group->pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) space_info->bytes_readonly += block_group->pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) block_group->pinned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * The normal path here is an unused block group is passed here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * then trimming is handled in the transaction commit path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * Async discard interposes before this to do the trimming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * before coming down the unused block group path as trimming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * will no longer be done later in the transaction commit path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto flip_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* DISCARD can flip during remount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) trimming = btrfs_test_opt(fs_info, DISCARD_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* Implicit trim during transaction commit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (trimming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) btrfs_freeze_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * Btrfs_remove_chunk will abort the transaction if things go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * horribly wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) ret = btrfs_remove_chunk(trans, block_group->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (trimming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) btrfs_unfreeze_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) goto end_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * If we're not mounted with -odiscard, we can just forget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * about this block group. Otherwise we'll need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * until transaction commit to do the actual discard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (trimming) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) spin_lock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * A concurrent scrub might have added us to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * fs_info->unused_bgs, so use a list_move operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * to add the block group to the deleted_bgs list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) list_move(&block_group->bg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) &trans->transaction->deleted_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) spin_unlock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) btrfs_get_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) end_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) mutex_unlock(&fs_info->delete_unused_bgs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) spin_lock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) spin_unlock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) flip_async:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) mutex_unlock(&fs_info->delete_unused_bgs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) btrfs_discard_punt_unused_bgs_list(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct btrfs_fs_info *fs_info = bg->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) spin_lock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (list_empty(&bg->bg_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) btrfs_get_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) trace_btrfs_add_unused_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_unlock(&fs_info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct btrfs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct extent_map_tree *em_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct btrfs_block_group_item bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) em_tree = &fs_info->mapping_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) read_lock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) read_unlock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (!em) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) "logical %llu len %llu found bg but no related chunk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) key->objectid, key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (em->start != key->objectid || em->len != key->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) "block group %llu len %llu mismatch with chunk %llu len %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) key->objectid, key->offset, em->start, em->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) goto out_free_em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) sizeof(bg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) flags = btrfs_stack_block_group_flags(&bg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) BTRFS_BLOCK_GROUP_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) key->objectid, key->offset, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) out_free_em:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static int find_first_block_group(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct btrfs_root *root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) btrfs_item_key_to_cpu(leaf, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (found_key.objectid >= key->objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ret = read_bg_from_eb(fs_info, &found_key, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) u64 extra_flags = chunk_to_extended(flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) BTRFS_EXTENDED_PROFILE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) write_seqlock(&fs_info->profiles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (flags & BTRFS_BLOCK_GROUP_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) fs_info->avail_data_alloc_bits |= extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (flags & BTRFS_BLOCK_GROUP_METADATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) fs_info->avail_metadata_alloc_bits |= extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) fs_info->avail_system_alloc_bits |= extra_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) write_sequnlock(&fs_info->profiles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * @chunk_start: logical address of block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * @physical: physical address to map to logical addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * @logical: return array of logical addresses which map to @physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * @naddrs: length of @logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * @stripe_len: size of IO stripe for the given block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * Maps a particular @physical disk address to a list of @logical addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * Used primarily to exclude those portions of a block group that contain super
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * block copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) EXPORT_FOR_TESTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) u64 physical, u64 **logical, int *naddrs, int *stripe_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct map_lookup *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) u64 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u64 data_stripe_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) u64 io_stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int i, nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (IS_ERR(em))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) map = em->map_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) data_stripe_length = em->orig_block_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) io_stripe_size = map->stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* For RAID5/6 adjust to a full IO stripe length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) io_stripe_size = map->stripe_len * nr_data_stripes(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) for (i = 0; i < map->num_stripes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) bool already_inserted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) u64 stripe_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (!in_range(physical, map->stripes[i].physical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) data_stripe_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) stripe_nr = physical - map->stripes[i].physical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) stripe_nr = div64_u64(stripe_nr, map->stripe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) stripe_nr = stripe_nr * map->num_stripes + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) stripe_nr = div_u64(stripe_nr, map->sub_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) stripe_nr = stripe_nr * map->num_stripes + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * The remaining case would be for RAID56, multiply by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * nr_data_stripes(). Alternatively, just use rmap_len below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * instead of map->stripe_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) bytenr = chunk_start + stripe_nr * io_stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Ensure we don't add duplicate addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) for (j = 0; j < nr; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (buf[j] == bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) already_inserted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (!already_inserted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) buf[nr++] = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) *logical = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) *naddrs = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) *stripe_len = io_stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int exclude_super_stripes(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) u64 *logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int i, nr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) cache->bytes_super += stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ret = btrfs_add_excluded_extent(fs_info, cache->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) stripe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) bytenr = btrfs_sb_offset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ret = btrfs_rmap_block(fs_info, cache->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) bytenr, &logical, &nr, &stripe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) while (nr--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) u64 len = min_t(u64, stripe_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) cache->start + cache->length - logical[nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) cache->bytes_super += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ret = btrfs_add_excluded_extent(fs_info, logical[nr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) kfree(logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) kfree(logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static void link_block_group(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) struct btrfs_space_info *space_info = cache->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) int index = btrfs_bg_flags_to_raid_index(cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) down_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) list_add_tail(&cache->list, &space_info->block_groups[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) up_write(&space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static struct btrfs_block_group *btrfs_create_block_group_cache(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct btrfs_fs_info *fs_info, u64 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) cache = kzalloc(sizeof(*cache), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (!cache->free_space_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) kfree(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) cache->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) cache->fs_info = fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) refcount_set(&cache->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) spin_lock_init(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) init_rwsem(&cache->data_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) INIT_LIST_HEAD(&cache->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) INIT_LIST_HEAD(&cache->cluster_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) INIT_LIST_HEAD(&cache->bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) INIT_LIST_HEAD(&cache->ro_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) INIT_LIST_HEAD(&cache->discard_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) INIT_LIST_HEAD(&cache->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) INIT_LIST_HEAD(&cache->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) btrfs_init_free_space_ctl(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) atomic_set(&cache->frozen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) mutex_init(&cache->free_space_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * Iterate all chunks and verify that each of them has the corresponding block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct extent_map_tree *map_tree = &fs_info->mapping_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct btrfs_block_group *bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) u64 start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) read_lock(&map_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * lookup_extent_mapping will return the first extent map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * intersecting the range, so setting @len to 1 is enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * get the first chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) em = lookup_extent_mapping(map_tree, start, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) read_unlock(&map_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (!em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) bg = btrfs_lookup_block_group(fs_info, em->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (!bg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) "chunk start=%llu len=%llu doesn't have corresponding block group",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) em->start, em->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (bg->start != em->start || bg->length != em->len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) em->start, em->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) bg->start, bg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) start = em->start + em->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static void read_block_group_item(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) const struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) struct btrfs_block_group_item bgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) cache->length = key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) sizeof(bgi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) cache->used = btrfs_stack_block_group_used(&bgi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) cache->flags = btrfs_stack_block_group_flags(&bgi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static int read_one_block_group(struct btrfs_fs_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) const struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) int need_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) cache = btrfs_create_block_group_cache(info, key->objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) read_block_group_item(cache, path, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) set_free_space_tree_thresholds(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (need_clear) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * When we mount with old space cache, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * set BTRFS_DC_CLEAR and set dirty flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * truncate the old free space cache inode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * setup a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * b) Setting 'dirty flag' makes sure that we flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * the new space cache info onto disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (btrfs_test_opt(info, SPACE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) cache->disk_cache_state = BTRFS_DC_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) btrfs_err(info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) cache->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * We need to exclude the super stripes now so that the space info has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * super bytes accounted for, otherwise we'll think we have more space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * than we actually do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) ret = exclude_super_stripes(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /* We may have excluded something, so call this just in case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * Check for two cases, either we are full, and therefore don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * to bother with the caching work since we won't find any space, or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * are empty, and we can just add all the space in and be done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * This saves us _a_lot_ of time, particularly in the full case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (cache->length == cache->used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) cache->last_byte_to_unpin = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) cache->cached = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) } else if (cache->used == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) cache->last_byte_to_unpin = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) cache->cached = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) add_new_free_space(cache, cache->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) cache->start + cache->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) ret = btrfs_add_block_group_cache(info, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) btrfs_remove_free_space_cache(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) trace_btrfs_add_block_group(info, cache, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) btrfs_update_space_info(info, cache->flags, cache->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) cache->used, cache->bytes_super, &space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) cache->space_info = space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) link_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) set_avail_alloc_bits(info, cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (btrfs_chunk_readonly(info, cache->start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) inc_block_group_ro(cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) } else if (cache->used == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ASSERT(list_empty(&cache->bg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (btrfs_test_opt(info, DISCARD_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) btrfs_discard_queue_work(&info->discard_ctl, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) btrfs_mark_bg_unused(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) int btrfs_read_block_groups(struct btrfs_fs_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int need_clear = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) u64 cache_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) key.objectid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) cache_gen = btrfs_super_cache_generation(info->super_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (btrfs_test_opt(info, SPACE_CACHE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) btrfs_super_generation(info->super_copy) != cache_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) need_clear = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (btrfs_test_opt(info, CLEAR_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) need_clear = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) ret = find_first_block_group(info, path, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) ret = read_one_block_group(info, path, &key, need_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) key.objectid += key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) list_for_each_entry(space_info, &info->space_info, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (list_empty(&space_info->block_groups[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) cache = list_first_entry(&space_info->block_groups[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) btrfs_sysfs_add_block_group_type(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (!(btrfs_get_alloc_profile(info, space_info->flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) (BTRFS_BLOCK_GROUP_RAID10 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) BTRFS_BLOCK_GROUP_RAID1_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) BTRFS_BLOCK_GROUP_RAID56_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) BTRFS_BLOCK_GROUP_DUP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * Avoid allocating from un-mirrored block group if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * mirrored block groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) list_for_each_entry(cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) &space_info->block_groups[BTRFS_RAID_RAID0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) inc_block_group_ro(cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) list_for_each_entry(cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) &space_info->block_groups[BTRFS_RAID_SINGLE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) inc_block_group_ro(cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) btrfs_init_global_block_rsv(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) ret = check_chunk_block_group_mappings(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static int insert_block_group_item(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct btrfs_block_group_item bgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) btrfs_set_stack_block_group_used(&bgi, block_group->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) btrfs_set_stack_block_group_chunk_objectid(&bgi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) BTRFS_FIRST_CHUNK_TREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) key.objectid = block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) key.offset = block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (!trans->can_flush_pending_bgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) while (!list_empty(&trans->new_bgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) block_group = list_first_entry(&trans->new_bgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) index = btrfs_bg_flags_to_raid_index(block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) ret = insert_block_group_item(trans, block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ret = btrfs_finish_chunk_alloc(trans, block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) add_block_group_free_space(trans, block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * If we restriped during balance, we may have added a new raid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * type, so now add the sysfs entries when it is safe to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * We don't have to worry about locking here as it's handled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * btrfs_sysfs_add_block_group_type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (block_group->space_info->block_group_kobjs[index] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) btrfs_sysfs_add_block_group_type(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /* Already aborted the transaction if it failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) btrfs_delayed_refs_rsv_release(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) list_del_init(&block_group->bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) btrfs_trans_release_chunk_metadata(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) u64 type, u64 chunk_offset, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) btrfs_set_log_full_commit(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) cache->length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) set_free_space_tree_thresholds(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) cache->used = bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) cache->flags = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) cache->last_byte_to_unpin = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) cache->cached = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) cache->needs_free_space = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ret = exclude_super_stripes(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* We may have excluded something, so call this just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) add_new_free_space(cache, chunk_offset, chunk_offset + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) btrfs_free_excluded_extents(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (btrfs_should_fragment_free_space(cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) u64 new_bytes_used = size - bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) bytes_used += new_bytes_used >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) fragment_free_space(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * Ensure the corresponding space_info object is created and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * assigned to our block group. We want our bg to be added to the rbtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * with its ->space_info set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) ASSERT(cache->space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ret = btrfs_add_block_group_cache(fs_info, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) btrfs_remove_free_space_cache(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * Now that our block group has its ->space_info set and is inserted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * the rbtree, update the space info's counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) trace_btrfs_add_block_group(fs_info, cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) cache->bytes_super, &cache->space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) btrfs_update_global_block_rsv(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) link_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) list_add_tail(&cache->bg_list, &trans->new_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) trans->delayed_ref_updates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) btrfs_update_delayed_refs_rsv(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) set_avail_alloc_bits(fs_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * Mark one block group RO, can be called several times for the same block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * @cache: the destination block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * ensure we still have some free space after marking this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * block group RO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) bool do_chunk_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) u64 alloc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) trans = btrfs_join_transaction(fs_info->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (IS_ERR(trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * we're not allowed to set block groups readonly after the dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * block groups cache has started writing. If it already started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * back off and let this transaction commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) mutex_lock(&fs_info->ro_block_group_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) u64 transid = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) mutex_unlock(&fs_info->ro_block_group_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) ret = btrfs_wait_for_commit(fs_info, transid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (do_chunk_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * If we are changing raid levels, try to allocate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * corresponding block group with the new raid level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (alloc_flags != cache->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ret = btrfs_chunk_alloc(trans, alloc_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) CHUNK_ALLOC_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * ENOSPC is allowed here, we may have enough space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * already allocated at the new raid level to carry on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ret = inc_block_group_ro(cache, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (!do_chunk_alloc || ret == -ETXTBSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) ret = inc_block_group_ro(cache, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (ret == -ETXTBSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) mutex_lock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) check_system_chunk(trans, alloc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) mutex_unlock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) mutex_unlock(&fs_info->ro_block_group_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct btrfs_space_info *sinfo = cache->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) BUG_ON(!cache->ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) spin_lock(&sinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (!--cache->ro) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) num_bytes = cache->length - cache->reserved -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) cache->pinned - cache->bytes_super - cache->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) sinfo->bytes_readonly -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) list_del_init(&cache->ro_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) spin_unlock(&sinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static int update_block_group_item(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct btrfs_root *root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) unsigned long bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) struct btrfs_block_group_item bgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) key.objectid = cache->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) key.offset = cache->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) btrfs_set_stack_block_group_used(&bgi, cache->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) btrfs_set_stack_block_group_chunk_objectid(&bgi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) BTRFS_FIRST_CHUNK_TREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) btrfs_set_stack_block_group_flags(&bgi, cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) btrfs_mark_buffer_dirty(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static int cache_save_setup(struct btrfs_block_group *block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct btrfs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct btrfs_fs_info *fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct btrfs_root *root = fs_info->tree_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) struct extent_changeset *data_reserved = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) u64 alloc_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) int dcs = BTRFS_DC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) u64 num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * If this block group is smaller than 100 megs don't bother caching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (block_group->length < (100 * SZ_1M)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) block_group->disk_cache_state = BTRFS_DC_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (TRANS_ABORTED(trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) inode = lookup_free_space_inode(block_group, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) ret = PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) BUG_ON(retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (block_group->ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) ret = create_free_space_inode(trans, block_group, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * We want to set the generation to 0, that way if anything goes wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * from here on out we know not to trust this cache when we load up next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) BTRFS_I(inode)->generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) ret = btrfs_update_inode(trans, root, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * So theoretically we could recover from this, simply set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * super cache generation to 0 so we know to invalidate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * cache, but then we'd have to keep track of the block groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * that fail this way so we know we _have_ to reset this cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * before the next commit or risk reading stale cache. So to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * limit our exposure to horrible edge cases lets just abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * transaction, this only happens in really bad situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* We've already setup this transaction, go ahead and exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (block_group->cache_generation == trans->transid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) dcs = BTRFS_DC_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (i_size_read(inode) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ret = btrfs_check_trunc_cache_free_space(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) &fs_info->global_block_rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (block_group->cached != BTRFS_CACHE_FINISHED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) !btrfs_test_opt(fs_info, SPACE_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * don't bother trying to write stuff out _if_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * a) we're not cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * b) we're with nospace_cache mount option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * c) we're with v2 space_cache (FREE_SPACE_TREE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) dcs = BTRFS_DC_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) * We hit an ENOSPC when setting up the cache in this transaction, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * skip doing the setup, we've already cleared the cache so we're safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * Try to preallocate enough space based on how big the block group is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * Keep in mind this has to include any pinned space which could end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * taking up quite a bit since it's not folded into the other space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) num_pages = div_u64(block_group->length, SZ_256M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (!num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) num_pages *= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) num_pages *= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) num_pages, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) &alloc_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * Our cache requires contiguous chunks so that we don't modify a bunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) * of metadata or split extents when writing the cache out, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) * we can enospc if we are heavily fragmented in addition to just normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * out of space conditions. So if we hit this just skip setting up any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * other block groups for this transaction, maybe we'll unpin enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * space the next time around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) dcs = BTRFS_DC_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) else if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (!ret && dcs == BTRFS_DC_SETUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) block_group->cache_generation = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) block_group->disk_cache_state = dcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) extent_changeset_free(data_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) struct btrfs_block_group *cache, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct btrfs_transaction *cur_trans = trans->transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (list_empty(&cur_trans->dirty_bgs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) !btrfs_test_opt(fs_info, SPACE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* Could add new block groups, use _safe just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) dirty_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (cache->disk_cache_state == BTRFS_DC_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) cache_save_setup(cache, trans, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * Transaction commit does final block group cache writeback during a critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * section where nothing is allowed to change the FS. This is required in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * order for the cache to actually match the block group, but can introduce a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * lot of latency into the commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) * There's a chance we'll have to redo some of it if the block group changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * again during the commit, but it greatly reduces the commit latency by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * getting rid of the easy block groups while we're still allowing others to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * join the commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct btrfs_transaction *cur_trans = trans->transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) int should_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) LIST_HEAD(dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) struct list_head *io = &cur_trans->io_bgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) int num_started = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) int loops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (list_empty(&cur_trans->dirty_bgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) list_splice_init(&cur_trans->dirty_bgs, &dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /* Make sure all the block groups on our dirty list actually exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) btrfs_create_pending_block_groups(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * cache_write_mutex is here only to save us from balance or automatic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * removal of empty block groups deleting this block group while we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * writing out the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) mutex_lock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) while (!list_empty(&dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) bool drop_reserve = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) cache = list_first_entry(&dirty, struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * This can happen if something re-dirties a block group that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) * is already under IO. Just wait for it to finish and then do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * it all again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (!list_empty(&cache->io_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) list_del_init(&cache->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) btrfs_wait_cache_io(trans, cache, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * btrfs_wait_cache_io uses the cache->dirty_list to decide if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * it should update the cache_state. Don't delete until after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * we wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) * Since we're not running in the commit critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * we need the dirty_bgs_lock to protect from update_block_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) list_del_init(&cache->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) should_put = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) cache_save_setup(cache, trans, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (cache->disk_cache_state == BTRFS_DC_SETUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) cache->io_ctl.inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) ret = btrfs_write_out_cache(trans, cache, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (ret == 0 && cache->io_ctl.inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) num_started++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) should_put = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * The cache_write_mutex is protecting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * io_list, also refer to the definition of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * btrfs_transaction::io_bgs for more details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) list_add_tail(&cache->io_list, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * If we failed to write the cache, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * generation will be bad and life goes on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ret = update_block_group_item(trans, path, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * Our block group might still be attached to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * of new block groups in the transaction handle of some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * other task (struct btrfs_trans_handle->new_bgs). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * means its block group item isn't yet in the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * tree. If this happens ignore the error, as we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * try again later in the critical section of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) * transaction commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (list_empty(&cache->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) list_add_tail(&cache->dirty_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) &cur_trans->dirty_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) btrfs_get_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) drop_reserve = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) } else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /* If it's not on the io list, we need to put the block group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (should_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (drop_reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) btrfs_delayed_refs_rsv_release(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * Avoid blocking other tasks for too long. It might even save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * us from writing caches for block groups that are going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) mutex_unlock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) mutex_lock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) mutex_unlock(&trans->transaction->cache_write_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * Go through delayed refs for all the stuff we've just kicked off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * and then loop back (just once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) ret = btrfs_run_delayed_refs(trans, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (!ret && loops == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) loops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) list_splice_init(&cur_trans->dirty_bgs, &dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * dirty_bgs_lock protects us from concurrent block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * deletes too (not just cache_write_mutex).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (!list_empty(&dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) list_splice_init(&dirty, &cur_trans->dirty_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) struct btrfs_block_group *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) struct btrfs_transaction *cur_trans = trans->transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) int should_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) struct list_head *io = &cur_trans->io_bgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) int num_started = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * Even though we are in the critical section of the transaction commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * we can still have concurrent tasks adding elements to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * transaction's list of dirty block groups. These tasks correspond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) * endio free space workers started when writeback finishes for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * allocate new block groups as a result of COWing nodes of the root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * tree when updating the free space inode. The writeback for the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * caches is triggered by an earlier call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * btrfs_start_dirty_block_groups() and iterations of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * Also we want to do the cache_save_setup first and then run the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * delayed refs to make sure we have the best chance at doing this all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * in one shot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) while (!list_empty(&cur_trans->dirty_bgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) cache = list_first_entry(&cur_trans->dirty_bgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * This can happen if cache_save_setup re-dirties a block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * that is already under IO. Just wait for it to finish and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) * then do it all again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (!list_empty(&cache->io_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) list_del_init(&cache->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) btrfs_wait_cache_io(trans, cache, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * Don't remove from the dirty list until after we've waited on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) * any pending IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) list_del_init(&cache->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) should_put = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) cache_save_setup(cache, trans, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) ret = btrfs_run_delayed_refs(trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) (unsigned long) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) cache->io_ctl.inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) ret = btrfs_write_out_cache(trans, cache, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (ret == 0 && cache->io_ctl.inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) num_started++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) should_put = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) list_add_tail(&cache->io_list, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) * If we failed to write the cache, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * generation will be bad and life goes on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) ret = update_block_group_item(trans, path, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) * One of the free space endio workers might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * created a new block group while updating a free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * cache's inode (at inode.c:btrfs_finish_ordered_io())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * and hasn't released its transaction handle yet, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * which case the new block group is still attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) * its transaction handle and its creation has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * finished yet (no block group item in the extent tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * yet, etc). If this is the case, wait for all free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * space endio workers to finish and retry. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) * very rare case so no need for a more efficient and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) * complex approach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) wait_event(cur_trans->writer_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) atomic_read(&cur_trans->num_writers) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) ret = update_block_group_item(trans, path, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) /* If its not on the io list, we need to put the block group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (should_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) btrfs_delayed_refs_rsv_release(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) spin_lock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) spin_unlock(&cur_trans->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * Refer to the definition of io_bgs member for details why it's safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * to use it without any locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) while (!list_empty(io)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) cache = list_first_entry(io, struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) list_del_init(&cache->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) btrfs_wait_cache_io(trans, cache, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) int btrfs_update_block_group(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) u64 bytenr, u64 num_bytes, int alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct btrfs_fs_info *info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct btrfs_block_group *cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) u64 total = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) u64 old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) u64 byte_in_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) int factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) /* Block accounting for super block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) spin_lock(&info->delalloc_root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) old_val = btrfs_super_bytes_used(info->super_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) old_val += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) old_val -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) btrfs_set_super_bytes_used(info->super_copy, old_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) spin_unlock(&info->delalloc_root_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) while (total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) cache = btrfs_lookup_block_group(info, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (!cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) factor = btrfs_bg_type_to_factor(cache->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) * If this block group has free space cache written out, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) * need to make sure to load it if we are removing space. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) * is because we need the unpinning stage to actually add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) * space back to the block group, otherwise we will leak space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (!alloc && !btrfs_block_group_done(cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) btrfs_cache_block_group(cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) byte_in_group = bytenr - cache->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) WARN_ON(byte_in_group > cache->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) spin_lock(&cache->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (btrfs_test_opt(info, SPACE_CACHE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) cache->disk_cache_state < BTRFS_DC_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) cache->disk_cache_state = BTRFS_DC_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) old_val = cache->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) num_bytes = min(total, cache->length - byte_in_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if (alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) old_val += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) cache->used = old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) cache->reserved -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) cache->space_info->bytes_reserved -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) cache->space_info->bytes_used += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) cache->space_info->disk_used += num_bytes * factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) spin_unlock(&cache->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) old_val -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) cache->used = old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) cache->pinned += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) btrfs_space_info_update_bytes_pinned(info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) cache->space_info, num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) cache->space_info->bytes_used -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) cache->space_info->disk_used -= num_bytes * factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) spin_unlock(&cache->space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) __btrfs_mod_total_bytes_pinned(cache->space_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) set_extent_dirty(&trans->transaction->pinned_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) bytenr, bytenr + num_bytes - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) spin_lock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (list_empty(&cache->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) list_add_tail(&cache->dirty_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) &trans->transaction->dirty_bgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) trans->delayed_ref_updates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) btrfs_get_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) spin_unlock(&trans->transaction->dirty_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) * No longer have used bytes in this block group, queue it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) * deletion. We do this after adding the block group to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * dirty list to avoid races between cleaner kthread and space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) * cache writeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if (!alloc && old_val == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) if (!btrfs_test_opt(info, DISCARD_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) btrfs_mark_bg_unused(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) btrfs_put_block_group(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) total -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) bytenr += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) /* Modified block groups are accounted for in the delayed_refs_rsv. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) btrfs_update_delayed_refs_rsv(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) * btrfs_add_reserved_bytes - update the block_group and space info counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) * @cache: The cache we are manipulating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) * @ram_bytes: The number of bytes of file content, and will be same to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) * @num_bytes except for the compress path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) * @num_bytes: The number of bytes in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) * @delalloc: The blocks are allocated for the delalloc write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) * This is called by the allocator when it reserves space. If this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) * reservation and the block group has become read only we cannot make the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * reservation and return -EAGAIN, otherwise this function always succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) u64 ram_bytes, u64 num_bytes, int delalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) struct btrfs_space_info *space_info = cache->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) spin_lock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) if (cache->ro) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) cache->reserved += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) space_info->bytes_reserved += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) trace_btrfs_space_reservation(cache->fs_info, "space_info",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) space_info->flags, num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) btrfs_space_info_update_bytes_may_use(cache->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) space_info, -ram_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (delalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) cache->delalloc_bytes += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) * Compression can use less space than we reserved, so wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) * tickets if that happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (num_bytes < ram_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) btrfs_try_granting_tickets(cache->fs_info, space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) * btrfs_free_reserved_bytes - update the block_group and space info counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) * @cache: The cache we are manipulating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) * @num_bytes: The number of bytes in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * @delalloc: The blocks are allocated for the delalloc write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) * This is called by somebody who is freeing space that was never actually used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * on disk. For example if you reserve some space for a new leaf in transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) * A and before transaction A commits you free that leaf, you call this with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) * reserve set to 0 in order to clear the reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) u64 num_bytes, int delalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) struct btrfs_space_info *space_info = cache->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) spin_lock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) if (cache->ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) space_info->bytes_readonly += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) cache->reserved -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) space_info->bytes_reserved -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) space_info->max_extent_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (delalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) cache->delalloc_bytes -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) btrfs_try_granting_tickets(cache->fs_info, space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) static void force_metadata_allocation(struct btrfs_fs_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) struct list_head *head = &info->space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) struct btrfs_space_info *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) list_for_each_entry(found, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) found->force_alloc = CHUNK_ALLOC_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) struct btrfs_space_info *sinfo, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) u64 bytes_used = btrfs_space_info_used(sinfo, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) u64 thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (force == CHUNK_ALLOC_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) * in limited mode, we want to have some free space up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) * about 1% of the FS size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (force == CHUNK_ALLOC_LIMITED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) thresh = btrfs_super_total_bytes(fs_info->super_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (sinfo->total_bytes - bytes_used < thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * If force is CHUNK_ALLOC_FORCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * - return 1 if it successfully allocates a chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) * - return errors including -ENOSPC otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) * If force is NOT CHUNK_ALLOC_FORCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * - return 0 if it doesn't need to allocate a new chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) * - return 1 if it successfully allocates a chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) * - return errors including -ENOSPC otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) enum btrfs_chunk_alloc_enum force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) bool wait_for_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) bool should_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /* Don't re-enter if we're already allocating a chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (trans->allocating_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) space_info = btrfs_find_space_info(fs_info, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) ASSERT(space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) spin_lock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (force < space_info->force_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) force = space_info->force_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) should_alloc = should_alloc_chunk(fs_info, space_info, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) if (space_info->full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) /* No more free physical space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (should_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) } else if (!should_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) } else if (space_info->chunk_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) * Someone is already allocating, so we need to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) * until this someone is finished and then loop to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) * recheck if we should continue with our allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) * attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) wait_for_alloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) mutex_lock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) mutex_unlock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) /* Proceed with allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) space_info->chunk_alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) wait_for_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) } while (wait_for_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) mutex_lock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) trans->allocating_chunk = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) * If we have mixed data/metadata chunks we want to make sure we keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * allocating mixed chunks instead of individual chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if (btrfs_mixed_space_info(space_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * if we're doing a data chunk, go ahead and make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) * we keep a reasonable number of metadata chunks allocated in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * FS as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) fs_info->data_chunk_allocations++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (!(fs_info->data_chunk_allocations %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) fs_info->metadata_ratio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) force_metadata_allocation(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) * Check if we have enough space in SYSTEM chunk because we may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) * to update devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) check_system_chunk(trans, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) ret = btrfs_alloc_chunk(trans, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) trans->allocating_chunk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) spin_lock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) space_info->full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) space_info->max_extent_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) space_info->chunk_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) spin_unlock(&space_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) mutex_unlock(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) * When we allocate a new chunk we reserve space in the chunk block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) * reserve to make sure we can COW nodes/leafs in the chunk tree or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) * add new nodes/leafs to it if we end up needing to do it when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) * inserting the chunk item and updating device items as part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * second phase of chunk allocation, performed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) * large number of new block groups to create in our transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * handle's new_bgs list to avoid exhausting the chunk block reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) * in extreme cases - like having a single transaction create many new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) * block groups when starting to write out the free space caches of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * the block groups that were made dirty during the lifetime of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) btrfs_create_pending_block_groups(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) u64 num_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (!num_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) num_dev = fs_info->fs_devices->rw_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) return num_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * Reserve space in the system space for allocating or removing a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) struct btrfs_space_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) u64 left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) u64 thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) u64 num_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) * Needed because we can end up allocating a system chunk and for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * atomic and race free space reservation in the chunk block reserve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) lockdep_assert_held(&fs_info->chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) spin_lock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) left = info->total_bytes - btrfs_space_info_used(info, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) spin_unlock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) num_devs = get_profile_num_devs(fs_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) /* num_devs device items to update and 1 chunk item to add or remove */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) btrfs_calc_insert_metadata_size(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) left, thresh, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) btrfs_dump_space_info(fs_info, info, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (left < thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) u64 flags = btrfs_system_alloc_profile(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) * Ignore failure to create system chunk. We might end up not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) * needing it, as we might not need to COW all nodes/leafs from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) * the paths we visit in the chunk tree (they were already COWed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) * or created in the current transaction for example).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) ret = btrfs_alloc_chunk(trans, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) ret = btrfs_block_rsv_add(fs_info->chunk_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) &fs_info->chunk_block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) thresh, BTRFS_RESERVE_NO_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) trans->chunk_bytes_reserved += thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) u64 last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) block_group = btrfs_lookup_first_block_group(info, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) while (block_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) btrfs_wait_block_group_cache_done(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (block_group->iref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) block_group = btrfs_next_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) if (!block_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (last == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) inode = block_group->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) block_group->iref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) block_group->inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) ASSERT(block_group->io_ctl.inode == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) last = block_group->start + block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) * Must be called only after stopping all workers, since we could have block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) * group caching kthreads running, and therefore they could race with us if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) * freed the block groups before stopping them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) int btrfs_free_block_groups(struct btrfs_fs_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) down_write(&info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) while (!list_empty(&info->caching_block_groups)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) caching_ctl = list_entry(info->caching_block_groups.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) struct btrfs_caching_control, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) list_del(&caching_ctl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) btrfs_put_caching_control(caching_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) up_write(&info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) spin_lock(&info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) while (!list_empty(&info->unused_bgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) block_group = list_first_entry(&info->unused_bgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) list_del_init(&block_group->bg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) spin_unlock(&info->unused_bgs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) spin_lock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) block_group = rb_entry(n, struct btrfs_block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) rb_erase(&block_group->cache_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) &info->block_group_cache_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) RB_CLEAR_NODE(&block_group->cache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) spin_unlock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) down_write(&block_group->space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) list_del(&block_group->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) up_write(&block_group->space_info->groups_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) * We haven't cached this block group, which means we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) * possibly have excluded extents on this block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (block_group->cached == BTRFS_CACHE_NO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) block_group->cached == BTRFS_CACHE_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) btrfs_free_excluded_extents(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) btrfs_remove_free_space_cache(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) ASSERT(list_empty(&block_group->dirty_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) ASSERT(list_empty(&block_group->io_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) ASSERT(list_empty(&block_group->bg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) ASSERT(refcount_read(&block_group->refs) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) ASSERT(block_group->swap_extents == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) btrfs_put_block_group(block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) spin_lock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) spin_unlock(&info->block_group_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) btrfs_release_global_block_rsv(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) while (!list_empty(&info->space_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) space_info = list_entry(info->space_info.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) struct btrfs_space_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) * Do not hide this behind enospc_debug, this is actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * important and indicates a real bug if this happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if (WARN_ON(space_info->bytes_pinned > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) space_info->bytes_reserved > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) space_info->bytes_may_use > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) btrfs_dump_space_info(info, space_info, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) WARN_ON(space_info->reclaim_size > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) list_del(&space_info->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) btrfs_sysfs_remove_space_info(space_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) void btrfs_freeze_block_group(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) atomic_inc(&cache->frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) struct btrfs_fs_info *fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) struct extent_map_tree *em_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) bool cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) spin_lock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) cleanup = (atomic_dec_and_test(&block_group->frozen) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) block_group->removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) spin_unlock(&block_group->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if (cleanup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) em_tree = &fs_info->mapping_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) write_lock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) em = lookup_extent_mapping(em_tree, block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) BUG_ON(!em); /* logic error, can't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) remove_extent_mapping(em_tree, em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) write_unlock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) /* once for us and once for the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) * We may have left one free space entry and other possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) * tasks trimming this block group have left 1 entry each one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) * Free them if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) __btrfs_remove_free_space_cache(block_group->free_space_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) spin_lock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if (bg->ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) bg->swap_extents++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) spin_unlock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) spin_lock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) ASSERT(!bg->ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) ASSERT(bg->swap_extents >= amount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) bg->swap_extents -= amount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) spin_unlock(&bg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }