Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (C) 2010-2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #ifndef DM_THIN_METADATA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define DM_THIN_METADATA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "persistent-data/dm-block-manager.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "persistent-data/dm-space-map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "persistent-data/dm-space-map-metadata.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * The metadata device is currently limited in size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * A metadata device larger than 16GB triggers a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Thin metadata superblock flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct dm_pool_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct dm_thin_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Device identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) typedef uint64_t dm_thin_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * Reopens or creates a new, empty metadata volume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 					       sector_t data_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 					       bool format_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) int dm_pool_metadata_close(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Compat feature flags.  Any incompat flags beyond the ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * specified below will prevent use of the thin metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define THIN_FEATURE_COMPAT_SUPP	  0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define THIN_FEATURE_COMPAT_RO_SUPP	  0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define THIN_FEATURE_INCOMPAT_SUPP	  0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Device creation/deletion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * An internal snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * You can only snapshot a quiesced origin i.e. one that is either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * suspended or not instanced at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			dm_thin_id origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * Deletes a virtual device from the metadata.  It _is_ safe to call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * when that device is open.  Operations on that device will just start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * failing.  You still need to call close() on the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			       dm_thin_id dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * Commits _all_ metadata changes: device creation, deletion, mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) int dm_pool_commit_metadata(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * Discards all uncommitted changes.  Rereads the superblock, rolling back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * to the last good transaction.  Thin devices remain open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * dm_thin_aborted_changes() tells you if they had uncommitted changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * If this call fails it's only useful to call dm_pool_metadata_close().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * All other methods will fail with -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) int dm_pool_abort_metadata(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * Set/get userspace transaction id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 					uint64_t current_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 					uint64_t new_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 					uint64_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Hold/get root for userspace transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * The metadata snapshot is a copy of the current superblock (minus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * space maps).  Userland can access the data structures for READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * operations only.  A small performance hit is incurred by providing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * copy of the metadata to userland due to extra copy-on-write operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * on the metadata nodes.  Release this as soon as you finish with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			      dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * Actions on a single virtual device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * Opening the same device more than once will fail with -EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			     struct dm_thin_device **td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int dm_pool_close_thin_device(struct dm_thin_device *td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dm_thin_lookup_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	dm_block_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	bool shared:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *   -EWOULDBLOCK iff @can_issue_io is set and would issue IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *   -ENODATA iff that mapping is not present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *   0 success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		       int can_issue_io, struct dm_thin_lookup_result *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * Retrieve the next run of contiguously mapped blocks.  Useful for working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * out where to break up IO.  Returns 0 on success, < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int dm_thin_find_mapped_range(struct dm_thin_device *td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			      dm_block_t begin, dm_block_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			      dm_block_t *thin_begin, dm_block_t *thin_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			      dm_block_t *pool_begin, bool *maybe_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * Obtain an unused block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Insert or remove block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			 dm_block_t data_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int dm_thin_remove_range(struct dm_thin_device *td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			 dm_block_t begin, dm_block_t end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * Queries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bool dm_thin_aborted_changes(struct dm_thin_device *td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				     dm_block_t *highest_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				 dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					  dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				  dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * Returns -ENOSPC if the new size is too small and already allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * blocks would be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * Flicks the underlying block manager into read only mode, so you know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * that nothing is changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 					dm_block_t threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 					dm_sm_threshold_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 					void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * Updates the superblock immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * Issue any prefetches that may be useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Pre-commit callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) typedef int (*dm_pool_pre_commit_fn)(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 					  dm_pool_pre_commit_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 					  void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #endif