^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2007 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef BTRFS_VOLUMES_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define BTRFS_VOLUMES_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/btrfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "async-thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) extern struct mutex uuid_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define BTRFS_STRIPE_LEN SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct btrfs_io_geometry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* remaining bytes before crossing a stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* offset of logical address in chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* length of single IO stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u64 stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* number of stripe where address falls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 stripe_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* offset of address in stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 stripe_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* offset of raid56 stripe into the chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u64 raid56_stripe_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Use sequence counter to get consistent device stat data on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 32-bit processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define __BTRFS_NEED_DEVICE_DATA_ORDERED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define btrfs_device_data_ordered_init(device) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) seqcount_init(&device->data_seqcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define btrfs_device_data_ordered_init(device) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define BTRFS_DEV_STATE_WRITEABLE (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define BTRFS_DEV_STATE_IN_FS_METADATA (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define BTRFS_DEV_STATE_MISSING (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define BTRFS_DEV_STATE_REPLACE_TGT (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define BTRFS_DEV_STATE_FLUSH_SENT (4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define BTRFS_DEV_STATE_NO_READA (5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct btrfs_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct list_head dev_list; /* device_list_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head dev_alloc_list; /* chunk mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct list_head post_commit_list; /* chunk mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct btrfs_fs_devices *fs_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rcu_string __rcu *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* the mode sent to blkdev_get */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) fmode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) blk_status_t last_flush_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) seqcount_t data_seqcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* the internal btrfs device id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u64 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* size of the device in memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u64 total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* size of the device on disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u64 disk_total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* bytes used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u64 bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* optimal io alignment for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 io_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* optimal io width for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 io_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* type and info about this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u64 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* minimal io size for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* physical drive uuid (or lvm uuid) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 uuid[BTRFS_UUID_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * size of the device on the current transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * This variant is update when committing the transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * and protected by chunk mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u64 commit_total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* bytes used on the current transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 commit_bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* for sending down flush barriers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct bio *flush_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct completion flush_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* per-device scrub information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct scrub_ctx *scrub_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* readahead state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) atomic_t reada_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u64 reada_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct reada_zone *reada_curr_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct radix_tree_root reada_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct radix_tree_root reada_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* disk I/O failure stats. For detailed description refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * enum btrfs_dev_stat_values in ioctl.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int dev_stats_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Counter to record the change of device stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) atomic_t dev_stats_ccnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct extent_io_tree alloc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct completion kobj_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* For sysfs/FSID/devinfo/devid/ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct kobject devid_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * If we read those variants at the context of their own lock, we needn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * use the following helpers, reading them directly is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define BTRFS_DEVICE_GETSET_FUNCS(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline u64 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) btrfs_device_get_##name(const struct btrfs_device *dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u64 size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned int seq; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) seq = read_seqcount_begin(&dev->data_seqcount); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) size = dev->name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline void \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) preempt_disable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) write_seqcount_begin(&dev->data_seqcount); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dev->name = size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) write_seqcount_end(&dev->data_seqcount); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) preempt_enable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define BTRFS_DEVICE_GETSET_FUNCS(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline u64 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) btrfs_device_get_##name(const struct btrfs_device *dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u64 size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) preempt_disable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) size = dev->name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) preempt_enable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline void \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) preempt_disable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dev->name = size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) preempt_enable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define BTRFS_DEVICE_GETSET_FUNCS(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline u64 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) btrfs_device_get_##name(const struct btrfs_device *dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return dev->name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dev->name = size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) enum btrfs_chunk_allocation_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BTRFS_CHUNK_ALLOC_REGULAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct btrfs_fs_devices {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u8 metadata_uuid[BTRFS_FSID_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bool fsid_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct list_head fs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u64 num_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u64 open_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u64 rw_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u64 missing_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u64 total_rw_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u64 total_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Highest generation number of seen devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u64 latest_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct block_device *latest_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* all of the devices in the FS, protected by a mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * so we can safely walk it to write out the supers without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * worrying about add/remove by the multi-device code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Scrubbing super can kick off supers writing by holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * this mutex lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct mutex device_list_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* List of all devices, protected by device_list_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct list_head devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Devices which can satisfy space allocation. Protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * chunk_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct list_head alloc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct list_head seed_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bool seeding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* set when we find or add a device that doesn't have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * nonrot flag set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) bool rotating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* sysfs kobjects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct kobject fsid_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct kobject *devices_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct kobject *devinfo_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct completion kobj_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) enum btrfs_chunk_allocation_policy chunk_alloc_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define BTRFS_BIO_INLINE_CSUM_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) - sizeof(struct btrfs_chunk)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) / sizeof(struct btrfs_stripe) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) - 2 * sizeof(struct btrfs_disk_key) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) - 2 * sizeof(struct btrfs_chunk)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) / sizeof(struct btrfs_stripe) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * we need the mirror number and stripe index to be passed around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * the call chain while we are processing end_io (especially errors).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Really, what we need is a btrfs_bio structure that has this info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * and is properly sized with its stripe array, but we're not there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * quite yet. We have our own btrfs bioset, and all of the bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * we allocate are actually btrfs_io_bios. We'll cram as much of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * struct btrfs_bio as we can into this over time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct btrfs_io_bio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned int mirror_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct btrfs_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u64 logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u8 *csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * This member must come last, bio_alloc_bioset will allocate enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * bytes for entire btrfs_io_bio but relies on bio being last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct bio bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return container_of(bio, struct btrfs_io_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (io_bio->csum != io_bio->csum_inline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kfree(io_bio->csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) io_bio->csum = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct btrfs_bio_stripe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct btrfs_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 physical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u64 length; /* only used for discard mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct btrfs_bio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) atomic_t stripes_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct btrfs_fs_info *fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u64 map_type; /* get from map_lookup->type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) bio_end_io_t *end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct bio *orig_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) atomic_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int max_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int num_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int mirror_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int num_tgtdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int *tgtdev_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * logical block numbers for the start of each stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * The last one or two are p/q. These are sorted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * so raid_map[0] is the start of our full stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u64 *raid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct btrfs_bio_stripe stripes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct btrfs_device_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct btrfs_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u64 dev_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u64 max_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u64 total_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct btrfs_raid_attr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u8 sub_stripes; /* sub_stripes info for map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u8 dev_stripes; /* stripes per dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u8 devs_max; /* max devs to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u8 devs_min; /* min devs needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u8 tolerated_failures; /* max tolerated fail devs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u8 devs_increment; /* ndevs has to be a multiple of this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u8 ncopies; /* how many copies to data has */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u8 nparity; /* number of stripes worth of bytes to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * parity information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u8 mindev_error; /* error code if min devs requisite is unmet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) const char raid_name[8]; /* name of the raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u64 bg_flag; /* block group flag of the raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct map_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u64 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int io_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int io_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u64 stripe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int num_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int sub_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int verified_stripes; /* For mount time dev extent verification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct btrfs_bio_stripe stripes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define map_lookup_size(n) (sizeof(struct map_lookup) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) (sizeof(struct btrfs_bio_stripe) * (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct btrfs_balance_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct btrfs_balance_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct btrfs_balance_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct btrfs_balance_args data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct btrfs_balance_args meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct btrfs_balance_args sys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct btrfs_balance_progress stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) enum btrfs_map_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) BTRFS_MAP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) BTRFS_MAP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) BTRFS_MAP_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) BTRFS_MAP_GET_READ_MIRRORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline enum btrfs_map_op btrfs_op(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) switch (bio_op(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return BTRFS_MAP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return BTRFS_MAP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return BTRFS_MAP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void btrfs_get_bbio(struct btrfs_bio *bbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void btrfs_put_bbio(struct btrfs_bio *bbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u64 logical, u64 *length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct btrfs_bio **bbio_ret, int mirror_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) u64 logical, u64 *length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct btrfs_bio **bbio_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) void btrfs_mapping_tree_free(struct extent_map_tree *tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int mirror_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) fmode_t flags, void *holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct btrfs_device *btrfs_scan_one_device(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) fmode_t flags, void *holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int btrfs_forget_devices(const char *path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) void btrfs_assign_next_active_device(struct btrfs_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct btrfs_device *this_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u64 devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) const char *devpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) const u64 *devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) const u8 *uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void btrfs_free_device(struct btrfs_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int btrfs_rm_device(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) const char *device_path, u64 devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void __exit btrfs_cleanup_fs_uuids(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int btrfs_grow_device(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct btrfs_device *device, u64 new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u64 devid, u8 *uuid, u8 *fsid, bool seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int btrfs_balance(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct btrfs_balance_control *bctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct btrfs_ioctl_balance_args *bargs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int btrfs_uuid_scan_kthread(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u64 *start, u64 *max_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct btrfs_ioctl_get_dev_stats *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u64 logical, u64 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u64 logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u64 chunk_offset, u64 chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u64 logical, u64 length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void btrfs_release_disk_super(struct btrfs_super_block *super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) atomic_inc(dev->dev_stat_values + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * This memory barrier orders stores updating statistics before stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * updating dev_stats_ccnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * It pairs with smp_rmb() in btrfs_run_dev_stats().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) atomic_inc(&dev->dev_stats_ccnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return atomic_read(dev->dev_stat_values + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = atomic_xchg(dev->dev_stat_values + index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * atomic_xchg implies a full memory barriers as per atomic_t.txt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * - RMW operations that have a return value are fully ordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * This implicit memory barriers is paired with the smp_rmb in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * btrfs_run_dev_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) atomic_inc(&dev->dev_stats_ccnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int index, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) atomic_set(dev->dev_stat_values + index, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * This memory barrier orders stores updating statistics before stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * updating dev_stats_ccnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * It pairs with smp_rmb() in btrfs_run_dev_stats().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) atomic_inc(&dev->dev_stats_ccnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * can be used as index to access btrfs_raid_array[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (flags & BTRFS_BLOCK_GROUP_RAID10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return BTRFS_RAID_RAID10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else if (flags & BTRFS_BLOCK_GROUP_RAID1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return BTRFS_RAID_RAID1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return BTRFS_RAID_RAID1C3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return BTRFS_RAID_RAID1C4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) else if (flags & BTRFS_BLOCK_GROUP_DUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return BTRFS_RAID_DUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) else if (flags & BTRFS_BLOCK_GROUP_RAID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return BTRFS_RAID_RAID0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) else if (flags & BTRFS_BLOCK_GROUP_RAID5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return BTRFS_RAID_RAID5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) else if (flags & BTRFS_BLOCK_GROUP_RAID6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return BTRFS_RAID_RAID6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct btrfs_device *failing_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) const char *device_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int btrfs_bg_type_to_factor(u64 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) const char *btrfs_bg_type_to_raid_name(u64 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) #endif