^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2012 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef DM_CACHE_METADATA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define DM_CACHE_METADATA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "dm-cache-block-types.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "dm-cache-policy-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "persistent-data/dm-space-map-metadata.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* FIXME: remove this restriction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The metadata device is currently limited in size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * A metadata device larger than 16GB triggers a warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Ext[234]-style compat feature flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * A new feature which old metadata will still be compatible with should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A new feature that is not compatible with old code should define a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * that flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * A new feature that is not compatible with old code accessing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * guard the relevant code with that flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * As these various flags are defined they should be added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * following masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct dm_cache_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Reopens or creates a new, empty metadata volume. Returns an ERR_PTR on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * failure. If reopening then features must match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sector_t data_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) bool may_format_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) size_t policy_hint_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned metadata_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * The metadata needs to know how many cache blocks there are. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * care about the origin, assuming the core target is giving us valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * origin blocks to map to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) sector_t discard_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dm_dblock_t new_nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dm_dblock_t dblock, bool discarded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int dm_cache_load_discards(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) load_discard_fn fn, void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dm_cblock_t cblock, bool dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) uint32_t hint, bool hint_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct dm_cache_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) load_mapping_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned nr_bits, unsigned long *bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dm_cache_statistics {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uint32_t read_hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) uint32_t read_misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) uint32_t write_hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) uint32_t write_misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct dm_cache_statistics *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * 'void' because it's no big deal if it fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct dm_cache_statistics *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dm_block_t *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void dm_cache_dump(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * The policy is invited to save a 32bit hint value for every cblock (eg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * for a hit count). These are stored against the policy name. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * policies are changed, then hints will be lost. If the machine crashes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * hints will be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * The hints are indexed by the cblock, but many policies will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * neccessarily have a fast way of accessing efficiently via cblock. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * rather than querying the policy for each cblock, we let it walk its data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * structures and fill in the hints in whatever order it wishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Query method. Are all the blocks in the cache clean?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif /* DM_CACHE_METADATA_H */