^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef BTRFS_EXTENT_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define BTRFS_EXTENT_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ulist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * flags for bio submission. The high bits indicate the compression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * type for this bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define EXTENT_BIO_COMPRESSED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define EXTENT_BIO_FLAG_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) EXTENT_BUFFER_UPTODATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) EXTENT_BUFFER_DIRTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) EXTENT_BUFFER_CORRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* this got triggered by readahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) EXTENT_BUFFER_READAHEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) EXTENT_BUFFER_TREE_REF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) EXTENT_BUFFER_STALE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) EXTENT_BUFFER_WRITEBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* read IO error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) EXTENT_BUFFER_READ_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) EXTENT_BUFFER_UNMAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) EXTENT_BUFFER_IN_TREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* write IO error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) EXTENT_BUFFER_WRITE_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* these are flags for __process_pages_contig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PAGE_UNLOCK (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PAGE_CLEAR_DIRTY (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PAGE_SET_WRITEBACK (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PAGE_END_WRITEBACK (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PAGE_SET_PRIVATE2 (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PAGE_SET_ERROR (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PAGE_LOCK (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * page->private values. Every page that is controlled by the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * map has page->private set to one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define EXTENT_PAGE_PRIVATE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The extent buffer bitmap operations are done with byte granularity instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * word granularity for two reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * 1. The bitmaps must be little-endian on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * single word in a bitmap may straddle two pages in the extent buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define BITMAP_FIRST_BYTE_MASK(start) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define BITMAP_LAST_BYTE_MASK(nbits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct btrfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct btrfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct btrfs_io_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct io_failure_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct extent_io_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int mirror_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long bio_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct bio *bio, u64 bio_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define INLINE_EXTENT_BUFFER_PAGES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct extent_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned long bflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spinlock_t refs_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) atomic_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) atomic_t io_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int read_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct rcu_head rcu_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pid_t lock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int blocking_writers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) atomic_t blocking_readers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bool lock_recursed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* >= 0 if eb belongs to a log tree, -1 otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) short log_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* protects write locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rwlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* readers use lock_wq while they wait for the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * lock holders to unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) wait_queue_head_t write_lock_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* writers use read_lock_wq while they wait for readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * to unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) wait_queue_head_t read_lock_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int spinning_writers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) atomic_t spinning_readers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) atomic_t read_locks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int write_locks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct list_head leak_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Structure to record how many bytes and which ranges are set/cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct extent_changeset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* How many bytes are set/cleared in this operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int bytes_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Changed ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct ulist range_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline void extent_changeset_init(struct extent_changeset *changeset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) changeset->bytes_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ulist_init(&changeset->range_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline struct extent_changeset *extent_changeset_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct extent_changeset *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ret = kmalloc(sizeof(*ret), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) extent_changeset_init(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void extent_changeset_release(struct extent_changeset *changeset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!changeset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) changeset->bytes_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ulist_release(&changeset->range_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void extent_changeset_free(struct extent_changeset *changeset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!changeset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) extent_changeset_release(changeset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) kfree(changeset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void extent_set_compress_type(unsigned long *bio_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int compress_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static inline int extent_compress_type(unsigned long bio_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct extent_map_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct page *page, size_t pg_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u64 start, u64 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int try_release_extent_mapping(struct page *page, gfp_t mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int try_release_extent_buffer(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int __must_check submit_one_bio(struct bio *bio, int mirror_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long bio_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct bio **bio, unsigned long *bio_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int read_flags, u64 *prev_em_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int extent_write_full_page(struct page *page, struct writeback_control *wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int extent_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct writeback_control *wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int btree_write_cache_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct writeback_control *wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void extent_readahead(struct readahead_control *rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 start, u64 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void set_page_extent_mapped(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u64 start, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void free_extent_buffer(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void free_extent_buffer_stale(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define WAIT_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define WAIT_COMPLETE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define WAIT_PAGE_LOCK 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int mirror_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline int num_extent_pages(const struct extent_buffer *eb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) (eb->start >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long start, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) void read_extent_buffer(const struct extent_buffer *eb, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void __user *dst, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const void *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void write_extent_buffer(const struct extent_buffer *eb, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned long start, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void copy_extent_buffer_full(const struct extent_buffer *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) const struct extent_buffer *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void copy_extent_buffer(const struct extent_buffer *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const struct extent_buffer *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long dst_offset, unsigned long src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void memcpy_extent_buffer(const struct extent_buffer *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long dst_offset, unsigned long src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void memmove_extent_buffer(const struct extent_buffer *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned long dst_offset, unsigned long src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long pos, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned long start, unsigned long pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void clear_extent_buffer_dirty(const struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bool set_extent_buffer_dirty(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void set_extent_buffer_uptodate(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void clear_extent_buffer_uptodate(struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int extent_buffer_under_io(const struct extent_buffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct page *locked_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned bits_to_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long page_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct bio *btrfs_bio_alloc(u64 first_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct bio *btrfs_bio_clone(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct btrfs_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct btrfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u64 length, u64 logical, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned int pg_offset, int mirror_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * When IO fails, either with EIO or csum verification fails, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * try other mirrors that might have a good copy of the data. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * io_failure_record is used to record state as we go through all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * mirrors. If another mirror has good data, the page is set up to date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * and things continue. If a good mirror can't be found, the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * bio end_io callback is called to indicate things have failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct io_failure_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u64 logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long bio_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int this_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int failed_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int in_validation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) blk_status_t btrfs_submit_read_repair(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct bio *failed_bio, u64 phy_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct page *page, unsigned int pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 start, u64 end, int failed_mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) submit_bio_hook_t *submit_bio_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) bool find_lock_delalloc_range(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct page *locked_page, u64 *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u64 *end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u64 start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #ifdef CONFIG_BTRFS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define btrfs_extent_buffer_leak_debug_check(fs_info) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif