^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _LINUX_DM_BLOCK_MANAGER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _LINUX_DM_BLOCK_MANAGER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Block number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) typedef uint64_t dm_block_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct dm_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) dm_block_t dm_block_location(struct dm_block *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void *dm_block_data(struct dm_block *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @name should be a unique identifier for the block manager, no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * than 32 chars.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @max_held_per_thread should be the maximum number of locks, read or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * write, that an individual thread holds at any one time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct dm_block_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct dm_block_manager *dm_block_manager_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct block_device *bdev, unsigned block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned max_held_per_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void dm_block_manager_destroy(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned dm_bm_block_size(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The validator allows the caller to verify newly-read data and modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * the data just before writing, e.g. to calculate checksums. It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * important to be consistent with your use of validators. The only time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * you can change validators is if you call dm_bm_write_lock_zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct dm_block_validator {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void (*prepare_for_write)(struct dm_block_validator *v, struct dm_block *b, size_t block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Return 0 if the checksum is valid or < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int (*check)(struct dm_block_validator *v, struct dm_block *b, size_t block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * You can have multiple concurrent readers or a single writer holding a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * block lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * dm_bm_lock() locks a block and returns through @result a pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * memory that holds a copy of that block. If you have write-locked the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * block then any changes you make to memory pointed to by @result will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * written back to the disk sometime after dm_bm_unlock is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct dm_block **result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int dm_bm_write_lock(struct dm_block_manager *bm, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct dm_block **result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The *_try_lock variants return -EWOULDBLOCK if the block isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * available immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int dm_bm_read_try_lock(struct dm_block_manager *bm, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct dm_block **result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Use dm_bm_write_lock_zero() when you know you're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * overwrite the block completely. It saves a disk read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct dm_block **result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void dm_bm_unlock(struct dm_block *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * It's a common idiom to have a superblock that should be committed last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @superblock should be write-locked on entry. It will be unlocked during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * this function. All dirty blocks are guaranteed to be written and flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * before the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * This method always blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int dm_bm_flush(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Request data is prefetched into the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Switches the bm to a read only mode. Once read-only mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * has been entered the following functions will return -EPERM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * dm_bm_write_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * dm_bm_write_lock_zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * dm_bm_flush_and_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Additionally you should not use dm_bm_unlock_move, however no error will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * be returned if you do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bool dm_bm_is_read_only(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void dm_bm_set_read_only(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void dm_bm_set_read_write(struct dm_block_manager *bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif /* _LINUX_DM_BLOCK_MANAGER_H */