^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "dm-space-map-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "dm-transaction-manager.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define DM_MSG_PREFIX "space map common"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Index validator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define INDEX_CSUM_XOR 160478
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void index_prepare_for_write(struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct dm_block *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) size_t block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct disk_metadata_index *mi_le = dm_block_data(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) mi_le->blocknr = cpu_to_le64(dm_block_location(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) block_size - sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) INDEX_CSUM_XOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int index_check(struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct dm_block *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) size_t block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct disk_metadata_index *mi_le = dm_block_data(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __le32 csum_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) le64_to_cpu(mi_le->blocknr), dm_block_location(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) block_size - sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) INDEX_CSUM_XOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (csum_disk != mi_le->csum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DMERR_LIMIT("index_check failed: csum %u != wanted %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct dm_block_validator index_validator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .name = "index",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .prepare_for_write = index_prepare_for_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .check = index_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Bitmap validator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define BITMAP_CSUM_XOR 240779
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct dm_block *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size_t block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct disk_bitmap_header *disk_header = dm_block_data(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) disk_header->blocknr = cpu_to_le64(dm_block_location(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) block_size - sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) BITMAP_CSUM_XOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int dm_bitmap_check(struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct dm_block *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) size_t block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct disk_bitmap_header *disk_header = dm_block_data(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __le32 csum_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) le64_to_cpu(disk_header->blocknr), dm_block_location(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) block_size - sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) BITMAP_CSUM_XOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (csum_disk != disk_header->csum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct dm_block_validator dm_sm_bitmap_validator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .name = "sm_bitmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .prepare_for_write = dm_bitmap_prepare_for_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .check = dm_bitmap_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define ENTRIES_PER_WORD 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define ENTRIES_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void *dm_bitmap_data(struct dm_block *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return dm_block_data(b) + sizeof(struct disk_bitmap_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static unsigned dm_bitmap_word_used(void *addr, unsigned b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __le64 *words_le = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) uint64_t bits = le64_to_cpu(*w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return !(~bits & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static unsigned sm_lookup_bitmap(void *addr, unsigned b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __le64 *words_le = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned hi, lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) b = (b & (ENTRIES_PER_WORD - 1)) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) hi = !!test_bit_le(b, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) lo = !!test_bit_le(b + 1, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return (hi << 1) | lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __le64 *words_le = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) b = (b & (ENTRIES_PER_WORD - 1)) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (val & 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __set_bit_le(b, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __clear_bit_le(b, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (val & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __set_bit_le(b + 1, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __clear_bit_le(b + 1, (void *) w_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static int sm_find_free(void *addr, unsigned begin, unsigned end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while (begin < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!(begin & (ENTRIES_PER_WORD - 1)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dm_bitmap_word_used(addr, begin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) begin += ENTRIES_PER_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!sm_lookup_bitmap(addr, begin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *result = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) begin++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) memset(ll, 0, sizeof(struct ll_disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ll->tm = tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ll->bitmap_info.tm = tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ll->bitmap_info.levels = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Because the new bitmap blocks are created via a shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * operation, the old entry has already had its reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * decremented and we don't need the btree to do any bookkeeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ll->bitmap_info.value_type.inc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ll->bitmap_info.value_type.dec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ll->bitmap_info.value_type.equal = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ll->ref_count_info.tm = tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ll->ref_count_info.levels = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ll->ref_count_info.value_type.size = sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ll->ref_count_info.value_type.inc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ll->ref_count_info.value_type.dec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ll->ref_count_info.value_type.equal = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (ll->block_size > (1 << 30)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) DMERR("block size too big to hold bitmaps");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ENTRIES_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ll->nr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ll->bitmap_root = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ll->ref_count_root = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ll->bitmap_index_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dm_block_t i, nr_blocks, nr_indexes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned old_blocks, blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nr_blocks = ll->nr_blocks + extra_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (nr_indexes > ll->max_entries(ll)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) DMERR("space map too large");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * We need to set this before the dm_tm_new_block() call below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ll->nr_blocks = nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) for (i = old_blocks; i < blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct dm_block *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct disk_index_entry idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) idx.blocknr = cpu_to_le64(dm_block_location(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dm_tm_unlock(ll->tm, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) idx.nr_free = cpu_to_le32(ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) idx.none_free_before = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) r = ll->save_ie(ll, i, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dm_block_t index = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct disk_index_entry ie_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct dm_block *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (b >= ll->nr_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) DMERR_LIMIT("metadata block out of bounds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) b = do_div(index, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) r = ll->load_ie(ll, index, &ie_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) &dm_sm_bitmap_validator, &blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dm_tm_unlock(ll->tm, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) uint32_t *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __le32 le_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *result = le32_to_cpu(le_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int r = sm_ll_lookup_bitmap(ll, b, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (*result != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return sm_ll_lookup_big_ref_count(ll, b, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dm_block_t end, dm_block_t *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct disk_index_entry ie_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dm_block_t i, index_begin = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * FIXME: Use shifts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) begin = do_div(index_begin, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) end = do_div(end, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (end == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) end = ll->entries_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) for (i = index_begin; i < index_end; i++, begin = 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct dm_block *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) uint32_t bit_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) r = ll->load_ie(ll, i, &ie_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (le32_to_cpu(ie_disk.nr_free) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) &dm_sm_bitmap_validator, &blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) r = sm_find_free(dm_bitmap_data(blk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bit_end, &position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (r == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * This might happen because we started searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * part way through the bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dm_tm_unlock(ll->tm, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dm_tm_unlock(ll->tm, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *result = i * ll->entries_per_block + (dm_block_t) position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dm_block_t begin, dm_block_t end, dm_block_t *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) uint32_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* double check this block wasn't used in the old transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (*b >= old_ll->nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) r = sm_ll_lookup(old_ll, *b, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) begin = *b + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } while (count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int (*mutator)(void *context, uint32_t old, uint32_t *new),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void *context, enum allocation_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) uint32_t bit, old, ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct dm_block *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dm_block_t index = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct disk_index_entry ie_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void *bm_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) bit = do_div(index, ll->entries_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) r = ll->load_ie(ll, index, &ie_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) &dm_sm_bitmap_validator, &nb, &inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) DMERR("dm_tm_shadow_block() failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) bm_le = dm_bitmap_data(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) old = sm_lookup_bitmap(bm_le, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (old > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) r = sm_ll_lookup_big_ref_count(ll, b, &old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dm_tm_unlock(ll->tm, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) r = mutator(context, old, &ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dm_tm_unlock(ll->tm, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ref_count <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) sm_set_bitmap(bm_le, bit, ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dm_tm_unlock(ll->tm, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (old > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) r = dm_btree_remove(&ll->ref_count_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ll->ref_count_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) &b, &ll->ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __le32 le_rc = cpu_to_le32(ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) sm_set_bitmap(bm_le, bit, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dm_tm_unlock(ll->tm, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) __dm_bless_for_disk(&le_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) &b, &le_rc, &ll->ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) DMERR("ref count insert failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (ref_count && !old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *ev = SM_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ll->nr_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) le32_add_cpu(&ie_disk.nr_free, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (le32_to_cpu(ie_disk.none_free_before) == bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ie_disk.none_free_before = cpu_to_le32(bit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) } else if (old && !ref_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *ev = SM_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ll->nr_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) le32_add_cpu(&ie_disk.nr_free, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *ev = SM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ll->save_ie(ll, index, &ie_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int set_ref_count(void *context, uint32_t old, uint32_t *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *new = *((uint32_t *) context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) uint32_t ref_count, enum allocation_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *new = old + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) DMERR_LIMIT("unable to decrement a reference count below 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *new = old - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int sm_ll_commit(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ll->bitmap_index_changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) r = ll->commit(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ll->bitmap_index_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct disk_index_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct disk_index_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ll->bitmap_index_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int metadata_ll_init_index(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct dm_block *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) r = dm_tm_new_block(ll->tm, &index_validator, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ll->bitmap_root = dm_block_location(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dm_tm_unlock(ll->tm, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static int metadata_ll_open(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct dm_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) &index_validator, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dm_tm_unlock(ll->tm, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return MAX_METADATA_BITMAPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int metadata_ll_commit(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int r, inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct dm_block *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ll->bitmap_root = dm_block_location(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dm_tm_unlock(ll->tm, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) r = sm_ll_init(ll, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ll->load_ie = metadata_ll_load_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ll->save_ie = metadata_ll_save_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ll->init_index = metadata_ll_init_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ll->open_index = metadata_ll_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ll->max_entries = metadata_ll_max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ll->commit = metadata_ll_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ll->nr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ll->nr_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) r = ll->init_index(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) void *root_le, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct disk_sm_root smr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (len < sizeof(struct disk_sm_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) DMERR("sm_metadata root too small");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * We don't know the alignment of the root_le buffer, so need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * copy into a new structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) memcpy(&smr, root_le, sizeof(smr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) r = sm_ll_init(ll, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ll->load_ie = metadata_ll_load_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ll->save_ie = metadata_ll_save_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ll->init_index = metadata_ll_init_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ll->open_index = metadata_ll_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ll->max_entries = metadata_ll_max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ll->commit = metadata_ll_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ll->nr_blocks = le64_to_cpu(smr.nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ll->nr_allocated = le64_to_cpu(smr.nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ll->bitmap_root = le64_to_cpu(smr.bitmap_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ll->ref_count_root = le64_to_cpu(smr.ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ll->open_index(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct disk_index_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct disk_index_entry *ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) __dm_bless_for_disk(ie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) &index, ie, &ll->bitmap_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int disk_ll_init_index(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static int disk_ll_open(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -1ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static int disk_ll_commit(struct ll_disk *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) r = sm_ll_init(ll, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ll->load_ie = disk_ll_load_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ll->save_ie = disk_ll_save_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ll->init_index = disk_ll_init_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ll->open_index = disk_ll_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ll->max_entries = disk_ll_max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ll->commit = disk_ll_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ll->nr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ll->nr_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) r = ll->init_index(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) void *root_le, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct disk_sm_root *smr = root_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (len < sizeof(struct disk_sm_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) DMERR("sm_metadata root too small");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) r = sm_ll_init(ll, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ll->load_ie = disk_ll_load_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ll->save_ie = disk_ll_save_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ll->init_index = disk_ll_init_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ll->open_index = disk_ll_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ll->max_entries = disk_ll_max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ll->commit = disk_ll_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return ll->open_index(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*----------------------------------------------------------------*/