^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef BTRFS_EXTENT_IO_TREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define BTRFS_EXTENT_IO_TREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) struct extent_changeset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) struct io_failure_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /* Bits for the extent state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define EXTENT_DIRTY (1U << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define EXTENT_UPTODATE (1U << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define EXTENT_LOCKED (1U << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define EXTENT_NEW (1U << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define EXTENT_DELALLOC (1U << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define EXTENT_DEFRAG (1U << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define EXTENT_BOUNDARY (1U << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EXTENT_NODATASUM (1U << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define EXTENT_CLEAR_META_RESV (1U << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define EXTENT_NEED_WAIT (1U << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define EXTENT_DAMAGED (1U << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EXTENT_NORESERVE (1U << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define EXTENT_QGROUP_RESERVED (1U << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define EXTENT_CLEAR_DATA_RESV (1U << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define EXTENT_DELALLOC_NEW (1U << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) EXTENT_CLEAR_DATA_RESV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Redefined bits above which are used only in the device allocation tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * manipulation functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CHUNK_ALLOCATED EXTENT_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CHUNK_TRIMMED EXTENT_DEFRAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) CHUNK_TRIMMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) IO_TREE_FS_PINNED_EXTENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) IO_TREE_FS_EXCLUDED_EXTENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) IO_TREE_BTREE_INODE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) IO_TREE_INODE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) IO_TREE_INODE_IO_FAILURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) IO_TREE_RELOC_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) IO_TREE_TRANS_DIRTY_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) IO_TREE_ROOT_DIRTY_LOG_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) IO_TREE_INODE_FILE_EXTENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) IO_TREE_LOG_CSUM_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) IO_TREE_SELFTEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) IO_TREE_DEVICE_ALLOC_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct extent_io_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rb_root state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void *private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 dirty_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) bool track_uptodate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Who owns this io tree, should be one of IO_TREE_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u8 owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct extent_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u64 end; /* inclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* ADD NEW ELEMENTS AFTER THIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) wait_queue_head_t wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct io_failure_record *failrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct list_head leak_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int __init extent_state_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void __cold extent_state_cache_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void extent_io_tree_init(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct extent_io_tree *tree, unsigned int owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void *private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void extent_io_tree_release(struct extent_io_tree *tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct extent_state **cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return lock_extent_bits(tree, start, end, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int __init extent_io_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void __cold extent_io_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u64 count_range_bits(struct extent_io_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 *start, u64 search_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u64 max_bytes, unsigned bits, int contig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void free_extent_state(struct extent_state *state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned bits, int filled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct extent_state *cached_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned bits, struct extent_changeset *changeset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned bits, int wake, int delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct extent_state **cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned bits, int wake, int delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct extent_state **cached, gfp_t mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct extent_changeset *changeset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u64 end, struct extent_state **cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) GFP_NOFS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u64 start, u64 end, struct extent_state **cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) GFP_ATOMIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u64 end, unsigned bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int wake = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (bits & EXTENT_LOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) wake = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned bits, struct extent_changeset *changeset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned bits, u64 *failed_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct extent_state **cached_state, gfp_t mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 end, unsigned bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 end, struct extent_state **cached_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cached_state, GFP_NOFS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u64 end, gfp_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) NULL, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u64 end, struct extent_state **cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return clear_extent_bit(tree, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXTENT_DIRTY | EXTENT_DELALLOC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) EXTENT_DO_ACCOUNTING, 0, 0, cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned bits, unsigned clear_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct extent_state **cached_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u64 end, unsigned int extra_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct extent_state **cached_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return set_extent_bit(tree, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) NULL, cached_state, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u64 end, struct extent_state **cached_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return set_extent_bit(tree, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) NULL, cached_state, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 end, struct extent_state **cached_state, gfp_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cached_state, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u64 *start_ret, u64 *end_ret, unsigned bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct extent_state **cached_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u64 *start_ret, u64 *end_ret, unsigned bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u64 *start_ret, u64 *end_ret, unsigned bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int extent_invalidatepage(struct extent_io_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct page *page, unsigned long offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u64 *end, u64 max_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct extent_state **cached_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* This should be reworked in the future and put elsewhere. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int set_state_failrec(struct extent_io_tree *tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct io_failure_record *failrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int free_io_failure(struct extent_io_tree *failure_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct extent_io_tree *io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct io_failure_record *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int clean_io_failure(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct extent_io_tree *failure_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct extent_io_tree *io_tree, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct page *page, u64 ino, unsigned int pg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #endif /* BTRFS_EXTENT_IO_TREE_H */