Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #ifndef BTRFS_BLOCK_GROUP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #define BTRFS_BLOCK_GROUP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "free-space-cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) enum btrfs_disk_cache_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 	BTRFS_DC_WRITTEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 	BTRFS_DC_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	BTRFS_DC_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	BTRFS_DC_SETUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * This describes the state of the block_group for async discard.  This is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * to the two pass nature of it where extent discarding is prioritized over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * bitmap discarding.  BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * between lists to prevent contention for discard state variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * (eg. discard_cursor).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) enum btrfs_discard_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	BTRFS_DISCARD_EXTENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	BTRFS_DISCARD_BITMAPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	BTRFS_DISCARD_RESET_CURSOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * only allocate a chunk if we really need one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * chunks already allocated.  This is used as part of the clustering code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * help make sure we have a good pool of storage to cluster in, without filling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * the FS with empty chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * CHUNK_ALLOC_FORCE means it must try to allocate one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) enum btrfs_chunk_alloc_enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	CHUNK_ALLOC_NO_FORCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	CHUNK_ALLOC_LIMITED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	CHUNK_ALLOC_FORCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) struct btrfs_caching_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct btrfs_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	u64 progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	refcount_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /* Once caching_thread() finds this much free space, it will wake up waiters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define CACHING_CTL_WAKE_UP SZ_2M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) struct btrfs_block_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u64 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u64 pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	u64 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	u64 used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u64 delalloc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	u64 bytes_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u64 cache_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * If the free space extent count exceeds this number, convert the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * group to bitmaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	u32 bitmap_high_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * If the free space extent count drops below this number, convert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * block group back to extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32 bitmap_low_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * It is just used for the delayed data space allocation because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * only the data space allocation and the relative metadata update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * can be done cross the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct rw_semaphore data_rwsem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* For raid56, this is a full stripe, without parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	unsigned long full_stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned int ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned int iref:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	unsigned int has_caching_ctl:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	unsigned int removed:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int disk_cache_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	/* Cache tracking stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct btrfs_caching_control *caching_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u64 last_byte_to_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct btrfs_space_info *space_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/* Free space cache stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct btrfs_free_space_ctl *free_space_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	/* Block group cache stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct rb_node cache_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	/* For block groups in the same raid type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * List of struct btrfs_free_clusters for this block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * Today it will only have one thing on it, but that may change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct list_head cluster_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* For delayed block group creation or deletion of empty block groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct list_head bg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	/* For read-only block groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct list_head ro_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * When non-zero it means the block group's logical address and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 * device extents can not be reused for future block group allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 * until the counter goes down to 0. This is to prevent them from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * reused while some task is still using the block group after it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * deleted - we want to make sure they can only be reused for new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * groups after that task is done with the deleted block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	atomic_t frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	/* For discard operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct list_head discard_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	int discard_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	u64 discard_eligible_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	u64 discard_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	enum btrfs_discard_state discard_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* For dirty block groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct list_head dirty_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct list_head io_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct btrfs_io_ctl io_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * Incremented when doing extent allocations and holding a read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * on the space_info's groups_sem semaphore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * Decremented when an ordered extent that represents an IO against this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * block group's range is created (after it's added to its inode's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * root's list of ordered extents) or immediately after the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * if it's a metadata extent or fallocate extent (for these cases we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * don't create ordered extents).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	atomic_t reservations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * Incremented while holding the spinlock *lock* by a task checking if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * it can perform a nocow write (incremented if the value for the *ro*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * field is 0). Decremented by such tasks once they create an ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * extent or before that if some error happens before reaching that step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * This is to prevent races between block group relocation and nocow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * writes through direct IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	atomic_t nocow_writers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* Lock for free space tree operations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct mutex free_space_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * Does the block group need to be added to the free space tree?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 * Protected by free_space_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	int needs_free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 * Number of extents in this block group used for swap files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	 * All accesses protected by the spinlock 'lock'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	int swap_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* Record locked full stripes for RAID5/6 block group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	return (block_group->start + block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline bool btrfs_is_block_group_data_only(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 					struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * In mixed mode the fragmentation is expected to be high, lowering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * efficiency, so only proper data block groups are considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	       !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline int btrfs_should_fragment_free_space(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct btrfs_fs_info *fs_info = block_group->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct btrfs_block_group *btrfs_lookup_first_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		struct btrfs_fs_info *info, u64 bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct btrfs_block_group *btrfs_lookup_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		struct btrfs_fs_info *info, u64 bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct btrfs_block_group *btrfs_next_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void btrfs_get_block_group(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void btrfs_put_block_group(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 					const u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 				           u64 num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int btrfs_cache_block_group(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			    int load_cache_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct btrfs_caching_control *btrfs_get_caching_control(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 add_new_free_space(struct btrfs_block_group *block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		       u64 start, u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 				const u64 chunk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			     u64 group_start, struct extent_map *em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int btrfs_read_block_groups(struct btrfs_fs_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			   u64 type, u64 chunk_offset, u64 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			     bool do_chunk_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int btrfs_update_block_group(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			     u64 bytenr, u64 num_bytes, int alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			     u64 ram_bytes, u64 num_bytes, int delalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			       u64 num_bytes, int delalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		      enum btrfs_chunk_alloc_enum force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int btrfs_free_block_groups(struct btrfs_fs_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return cache->cached == BTRFS_CACHE_FINISHED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		cache->cached == BTRFS_CACHE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void btrfs_freeze_block_group(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #endif /* BTRFS_BLOCK_GROUP_H */