^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2016-2017 Milan Broz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016-2017 Mikulas Patocka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "dm-bio-record.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dm-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/async_tx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dm-bufio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DM_MSG_PREFIX "integrity"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DEFAULT_INTERLEAVE_SECTORS 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DEFAULT_JOURNAL_SIZE_FACTOR 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DEFAULT_BUFFER_SECTORS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DEFAULT_JOURNAL_WATERMARK 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DEFAULT_SYNC_MSEC 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DEFAULT_MAX_JOURNAL_SECTORS 131072
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define MIN_LOG2_INTERLEAVE_SECTORS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MAX_LOG2_INTERLEAVE_SECTORS 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define METADATA_WORKQUEUE_MAX_ACTIVE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define RECALC_SECTORS 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RECALC_WRITE_SUPER 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define BITMAP_FLUSH_INTERVAL (10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DISCARD_FILLER 0xf6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Warning - DEBUG_PRINT prints security-sensitive data to the log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * so it should not be enabled in the official kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) //#define DEBUG_PRINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) //#define INTERNAL_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * On disk structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SB_MAGIC "integrt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SB_VERSION_1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SB_VERSION_2 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SB_VERSION_3 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SB_VERSION_4 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SB_SECTORS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define MAX_SECTORS_PER_BLOCK 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct superblock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __u8 magic[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __u8 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __u8 log2_interleave_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __u16 integrity_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __u32 journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __u64 provided_data_sectors; /* userspace uses this value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __u8 log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __u8 log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __u8 pad[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __u64 recalc_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SB_FLAG_RECALCULATING 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SB_FLAG_DIRTY_BITMAP 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SB_FLAG_FIXED_PADDING 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define JOURNAL_ENTRY_ROUNDUP 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) typedef __u64 commit_id_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define JOURNAL_MAC_PER_SECTOR 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct journal_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __u32 sector_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __u32 sector_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __u64 sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) commit_id_t last_bytes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* __u8 tag[0]; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define JOURNAL_BLOCK_SECTORS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct journal_sector {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __u8 mac[JOURNAL_MAC_PER_SECTOR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) commit_id_t commit_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define METADATA_PADDING_SECTORS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define N_COMMIT_IDS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static unsigned char prev_commit_seq(unsigned char seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static unsigned char next_commit_seq(unsigned char seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return (seq + 1) % N_COMMIT_IDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * In-memory structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct journal_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct alg_spec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) char *alg_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) char *key_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __u8 *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct dm_integrity_c {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct dm_dev *meta_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __s8 log2_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) sector_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mempool_t journal_io_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct dm_io_client *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dm_bufio_client *bufio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct workqueue_struct *metadata_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct superblock *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned journal_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned n_bitmap_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct page_list *journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct page_list *journal_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct page_list *journal_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct page_list *recalc_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct page_list *may_write_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct bitmap_block_status *bbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned bitmap_flush_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int synchronous_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct bio_list synchronous_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct delayed_work bitmap_flush_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct crypto_skcipher *journal_crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct scatterlist **journal_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct scatterlist **journal_io_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct skcipher_request **sk_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct crypto_shash *journal_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct journal_node *journal_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct rb_root journal_tree_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sector_t provided_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned short journal_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned char journal_entries_per_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned char journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned short journal_section_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned journal_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sector_t data_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) sector_t meta_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned initial_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __s8 log2_metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __u8 log2_buffer_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __u8 sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __u8 log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned char mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct crypto_shash *internal_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* these variables are locked with endio_wait.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct rb_root in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct list_head wait_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) wait_queue_head_t endio_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct workqueue_struct *wait_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct workqueue_struct *offload_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned char commit_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) commit_id_t commit_ids[N_COMMIT_IDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned committed_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned n_committed_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned uncommitted_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned n_uncommitted_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned free_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned char free_section_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned free_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned free_sectors_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct workqueue_struct *commit_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct work_struct commit_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct workqueue_struct *writer_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct work_struct writer_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct workqueue_struct *recalc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct work_struct recalc_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u8 *recalc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u8 *recalc_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct bio_list flush_bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long autocommit_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct timer_list autocommit_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned autocommit_msec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) wait_queue_head_t copy_to_journal_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct completion crypto_backoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bool journal_uptodate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) bool just_formatted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bool recalculate_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) bool discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bool fix_padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bool legacy_recalculate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct alg_spec internal_hash_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct alg_spec journal_crypt_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct alg_spec journal_mac_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) atomic64_t number_of_mismatches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct notifier_block reboot_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct dm_integrity_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) sector_t logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sector_t n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bool waiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct list_head wait_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct dm_integrity_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct dm_integrity_c *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) enum req_opf op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bool fua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct dm_integrity_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sector_t metadata_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned metadata_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) atomic_t in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) blk_status_t bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct completion *completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct dm_bio_details bio_details;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct journal_completion {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct dm_integrity_c *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) atomic_t in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct journal_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct dm_integrity_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct journal_completion *comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct bitmap_block_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct dm_integrity_c *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct bio_list bio_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) spinlock_t bio_queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static struct kmem_cache *journal_io_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define JOURNAL_IO_MEMPOOL 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #ifdef DEBUG_PRINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) va_start(args, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) vprintk(msg, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pr_cont(":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pr_cont(" %02x", *bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define DEBUG_print(x, ...) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void dm_integrity_prepare(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * DM Integrity profile, protection is performed layer above (dm-crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static const struct blk_integrity_profile dm_integrity_profile = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .name = "DM-DIF-EXT-TAG",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .generate_fn = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .verify_fn = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .prepare_fn = dm_integrity_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .complete_fn = dm_integrity_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void integrity_bio_wait(struct work_struct *w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void dm_integrity_dtr(struct dm_target *ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (err == -EILSEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) atomic64_inc(&ic->number_of_mismatches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!cmpxchg(&ic->failed, 0, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) DMERR("Error on %s: %d", msg, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static int dm_integrity_failed(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return READ_ONCE(ic->failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) !ic->legacy_recalculate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned j, unsigned char seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Xor the number with section and sector, so that if a piece of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * journal is written at wrong place, it is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sector_t *area, sector_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *area = data_sector >> log2_interleave_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *area = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *offset = data_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #define sector_to_block(ic, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) (n) >>= (ic)->sb->log2_sectors_per_block; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) sector_t offset, unsigned *metadata_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) __u64 ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) unsigned mo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ms = area << ic->sb->log2_interleave_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (likely(ic->log2_metadata_run >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ms += area << ic->log2_metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ms += area * ic->metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ms >>= ic->log2_buffer_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) sector_to_block(ic, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (likely(ic->log2_tag_size >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *metadata_offset = mo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) sector_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) result = area << ic->sb->log2_interleave_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (likely(ic->log2_metadata_run >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) result += (area + 1) << ic->log2_metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) result += (area + 1) * ic->metadata_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) result += (sector_t)ic->initial_sectors + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) result += ic->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (unlikely(*sec_ptr >= ic->journal_sections))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *sec_ptr -= ic->journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void sb_set_version(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ic->sb->version = SB_VERSION_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ic->sb->version = SB_VERSION_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ic->sb->version = SB_VERSION_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ic->sb->version = SB_VERSION_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct dm_io_request io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct dm_io_region io_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) io_req.bi_op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) io_req.bi_op_flags = op_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) io_req.mem.type = DM_IO_KMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) io_req.mem.ptr.addr = ic->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) io_req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) io_req.client = ic->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) io_loc.sector = ic->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) io_loc.count = SB_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (op == REQ_OP_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) sb_set_version(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return dm_io(&io_req, 1, &io_loc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #define BITMAP_OP_TEST_ALL_SET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define BITMAP_OP_TEST_ALL_CLEAR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #define BITMAP_OP_SET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #define BITMAP_OP_CLEAR 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) sector_t sector, sector_t n_sectors, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned long bit, end_bit, this_end_bit, page, end_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) n_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ic->sb->log2_sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ic->log2_blocks_per_bitmap_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (unlikely(!n_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) end_bit = (sector + n_sectors - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) page = bit / (PAGE_SIZE * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) bit %= PAGE_SIZE * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) end_page = end_bit / (PAGE_SIZE * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) end_bit %= PAGE_SIZE * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (page < end_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) this_end_bit = PAGE_SIZE * 8 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) this_end_bit = end_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) data = lowmem_page_address(bitmap[page].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (mode == BITMAP_OP_TEST_ALL_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) while (bit <= this_end_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (data[bit / BITS_PER_LONG] != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) bit += BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } while (this_end_bit >= bit + BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!test_bit(bit, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) while (bit <= this_end_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (data[bit / BITS_PER_LONG] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) bit += BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } while (this_end_bit >= bit + BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (test_bit(bit, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else if (mode == BITMAP_OP_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) while (bit <= this_end_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) data[bit / BITS_PER_LONG] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) bit += BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } while (this_end_bit >= bit + BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) __set_bit(bit, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) } else if (mode == BITMAP_OP_CLEAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) clear_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) else while (bit <= this_end_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) data[bit / BITS_PER_LONG] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bit += BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) } while (this_end_bit >= bit + BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) __clear_bit(bit, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (unlikely(page < end_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (i = 0; i < n_bitmap_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned long *dst_data = lowmem_page_address(dst[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) unsigned long *src_data = lowmem_page_address(src[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) copy_page(dst_data, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return &ic->bbs[bitmap_block];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bool e, const char *function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (unlikely(section >= ic->journal_sections) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unlikely(offset >= limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) function, section, offset, ic->journal_sections, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned *pl_index, unsigned *pl_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) access_journal_check(ic, section, offset, false, "page_list_location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) sector = section * ic->journal_section_sectors + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned section, unsigned offset, unsigned *n_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned pl_index, pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) char *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) page_list_location(ic, section, offset, &pl_index, &pl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (n_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) va = lowmem_page_address(pl[pl_index].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return (struct journal_sector *)(va + pl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return access_page_list(ic, ic->journal, section, offset, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) unsigned rel_sector, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct journal_sector *js;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) access_journal_check(ic, section, n, true, "access_journal_entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rel_sector = n % JOURNAL_BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) offset = n / JOURNAL_BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) js = access_journal(ic, section, rel_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) n <<= ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) n += JOURNAL_BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) access_journal_check(ic, section, n, false, "access_journal_data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return access_journal(ic, section, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) SHASH_DESC_ON_STACK(desc, ic->journal_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned j, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) desc->tfm = ic->journal_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) r = crypto_shash_init(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dm_integrity_io_error(ic, "crypto_shash_init", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) for (j = 0; j < ic->journal_section_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct journal_entry *je = access_journal_entry(ic, section, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dm_integrity_io_error(ic, "crypto_shash_update", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) size = crypto_shash_digestsize(ic->journal_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (likely(size <= JOURNAL_MAC_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) r = crypto_shash_final(desc, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dm_integrity_io_error(ic, "crypto_shash_final", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) memset(result + size, 0, JOURNAL_MAC_SIZE - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) __u8 digest[HASH_MAX_DIGESTSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (WARN_ON(size > sizeof(digest))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dm_integrity_io_error(ic, "digest_size", -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) r = crypto_shash_final(desc, digest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dm_integrity_io_error(ic, "crypto_shash_final", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) memcpy(result, digest, JOURNAL_MAC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) memset(result, 0, JOURNAL_MAC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) __u8 result[JOURNAL_MAC_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!ic->journal_mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) section_mac(ic, section, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct journal_sector *js = access_journal(ic, section, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (likely(wr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dm_integrity_io_error(ic, "journal mac", -EILSEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void complete_journal_op(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct journal_completion *comp = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) BUG_ON(!atomic_read(&comp->in_flight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (likely(atomic_dec_and_test(&comp->in_flight)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) complete(&comp->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned n_sections, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct async_submit_ctl submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned pl_index, pl_offset, section_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct page_list *source_pl, *target_pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (likely(encrypt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) source_pl = ic->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) target_pl = ic->journal_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) source_pl = ic->journal_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) target_pl = ic->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) page_list_location(ic, section, 0, &pl_index, &pl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) section_index = pl_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) size_t this_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct page *src_pages[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct page *dst_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) while (unlikely(pl_index == section_index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (likely(encrypt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rw_section_mac(ic, section, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) section++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) n_sections--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (!n_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) page_list_location(ic, section, 0, §ion_index, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) dst_page = target_pl[pl_index].page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) src_pages[0] = source_pl[pl_index].page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) src_pages[1] = ic->journal_xor[pl_index].page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pl_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pl_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) n_bytes -= this_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } while (n_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) BUG_ON(n_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) async_tx_issue_pending_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static void complete_journal_encrypt(struct crypto_async_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct journal_completion *comp = req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (likely(err == -EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) complete(&comp->ic->crypto_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) complete_journal_op(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) complete_journal_encrypt, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (likely(encrypt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) r = crypto_skcipher_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) r = crypto_skcipher_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (likely(!r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (likely(r == -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (likely(r == -EBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) wait_for_completion(&comp->ic->crypto_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) reinit_completion(&comp->ic->crypto_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dm_integrity_io_error(comp->ic, "encrypt", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) unsigned n_sections, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct scatterlist **source_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct scatterlist **target_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) atomic_add(2, &comp->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (likely(encrypt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) source_sg = ic->journal_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) target_sg = ic->journal_io_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) source_sg = ic->journal_io_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) target_sg = ic->journal_scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct skcipher_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned ivsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) char *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (likely(encrypt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rw_section_mac(ic, section, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) req = ic->sk_requests[section];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) iv = req->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) memcpy(iv, iv + ivsize, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) req->src = source_sg[section];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) req->dst = target_sg[section];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (unlikely(do_crypt(encrypt, req, comp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) atomic_inc(&comp->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) section++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) n_sections--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) } while (n_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) atomic_dec(&comp->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) complete_journal_op(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) unsigned n_sections, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (ic->journal_xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return xor_journal(ic, encrypt, section, n_sections, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return crypt_journal(ic, encrypt, section, n_sections, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static void complete_journal_io(unsigned long error, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct journal_completion *comp = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (unlikely(error != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dm_integrity_io_error(comp->ic, "writing journal", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) complete_journal_op(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned sector, unsigned n_sectors, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct dm_io_request io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct dm_io_region io_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned pl_index, pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (unlikely(dm_integrity_failed(ic))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) complete_journal_io(-1UL, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) io_req.bi_op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) io_req.bi_op_flags = op_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) io_req.mem.type = DM_IO_PAGE_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (ic->journal_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) io_req.mem.ptr.pl = &ic->journal_io[pl_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) io_req.mem.ptr.pl = &ic->journal[pl_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) io_req.mem.offset = pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (likely(comp != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) io_req.notify.fn = complete_journal_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) io_req.notify.context = comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) io_req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) io_req.client = ic->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) io_loc.sector = ic->start + SB_SECTORS + sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) io_loc.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) r = dm_io(&io_req, 1, &io_loc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (comp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) complete_journal_io(-1UL, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) unsigned n_sections, struct journal_completion *comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) unsigned sector, n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) sector = section * ic->journal_section_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) n_sectors = n_sections * ic->journal_section_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct journal_completion io_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct journal_completion crypt_comp_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct journal_completion crypt_comp_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) io_comp.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) init_completion(&io_comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (commit_start + commit_sections <= ic->journal_sections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ic->journal_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) crypt_comp_1.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) init_completion(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) wait_for_completion_io(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) for (i = 0; i < commit_sections; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rw_section_mac(ic, commit_start + i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) commit_sections, &io_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned to_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) to_end = ic->journal_sections - commit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (ic->journal_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) crypt_comp_1.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) init_completion(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (try_wait_for_completion(&crypt_comp_1.comp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) reinit_completion(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) wait_for_completion_io(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) crypt_comp_2.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) init_completion(&crypt_comp_2.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) wait_for_completion_io(&crypt_comp_1.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) wait_for_completion_io(&crypt_comp_2.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) for (i = 0; i < to_end; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rw_section_mac(ic, commit_start + i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) for (i = 0; i < commit_sections - to_end; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) rw_section_mac(ic, i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) wait_for_completion_io(&io_comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct dm_io_request io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct dm_io_region io_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned sector, pl_index, pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (unlikely(dm_integrity_failed(ic))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) fn(-1UL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) io_req.bi_op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) io_req.bi_op_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) io_req.mem.type = DM_IO_PAGE_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) io_req.mem.ptr.pl = &ic->journal[pl_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) io_req.mem.offset = pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) io_req.notify.fn = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) io_req.notify.context = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) io_req.client = ic->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) io_loc.bdev = ic->dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) io_loc.sector = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) io_loc.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) r = dm_io(&io_req, 1, &io_loc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) fn(-1UL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) range1->logical_sector + range1->n_sectors > range2->logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct rb_node **n = &ic->in_progress.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct rb_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (likely(check_waiting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct dm_integrity_range *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) list_for_each_entry(range, &ic->wait_list, wait_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (unlikely(ranges_overlap(range, new_range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) while (*n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) parent = *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) n = &range->node.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) n = &range->node.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) rb_link_node(&new_range->node, parent, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rb_insert_color(&new_range->node, &ic->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) rb_erase(&range->node, &ic->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) while (unlikely(!list_empty(&ic->wait_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct dm_integrity_range *last_range =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct task_struct *last_range_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) last_range_task = last_range->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) list_del(&last_range->wait_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!add_new_range(ic, last_range, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) last_range->task = last_range_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) list_add(&last_range->wait_entry, &ic->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) last_range->waiting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) wake_up_process(last_range_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) spin_lock_irqsave(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) remove_range_unlocked(ic, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) new_range->waiting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) list_add_tail(&new_range->wait_entry, &ic->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) new_range->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) __set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) } while (unlikely(new_range->waiting));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (unlikely(!add_new_range(ic, new_range, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) wait_and_add_new_range(ic, new_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void init_journal_node(struct journal_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) RB_CLEAR_NODE(&node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) node->sector = (sector_t)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct rb_node **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct rb_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) node->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) BUG_ON(!RB_EMPTY_NODE(&node->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) link = &ic->journal_tree_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct journal_node *j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) j = container_of(parent, struct journal_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (sector < j->sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) link = &j->node.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) link = &j->node.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rb_link_node(&node->node, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) rb_insert_color(&node->node, &ic->journal_tree_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) BUG_ON(RB_EMPTY_NODE(&node->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) rb_erase(&node->node, &ic->journal_tree_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) init_journal_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #define NOT_FOUND (-1U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct rb_node *n = ic->journal_tree_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) unsigned found = NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) *next_sector = (sector_t)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct journal_node *j = container_of(n, struct journal_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (sector == j->sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) found = j - ic->journal_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (sector < j->sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) *next_sector = j->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) n = j->node.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) n = j->node.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct journal_node *node, *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (unlikely(pos >= ic->journal_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) node = &ic->journal_tree[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (unlikely(RB_EMPTY_NODE(&node->node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (unlikely(node->sector != sector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) next = rb_next(&node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (unlikely(!next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) next_node = container_of(next, struct journal_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return next_node->sector != sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct journal_node *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) unsigned next_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) BUG_ON(RB_EMPTY_NODE(&node->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) next = rb_next(&node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (unlikely(!next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) next_node = container_of(next, struct journal_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (next_node->sector != node->sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (next_section >= ic->committed_section &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) next_section < ic->committed_section + ic->n_committed_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #define TAG_READ 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) #define TAG_WRITE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #define TAG_CMP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) unsigned *metadata_offset, unsigned total_size, int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) #define MAY_BE_FILLER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) #define MAY_BE_HASH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned hash_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) unsigned char *data, *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) unsigned to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) r = dm_integrity_failed(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) data = dm_bufio_read(ic->bufio, *metadata_block, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (IS_ERR(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dp = data + *metadata_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (op == TAG_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) memcpy(tag, dp, to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) } else if (op == TAG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) memcpy(dp, tag, to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* e.g.: op == TAG_CMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (likely(is_power_of_2(ic->tag_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (unlikely(memcmp(dp, tag, to_copy)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (unlikely(!ic->discard) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto thorough_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) unsigned i, ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) thorough_test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ts = total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) for (i = 0; i < to_copy; i++, ts--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (unlikely(dp[i] != tag[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) may_be &= ~MAY_BE_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (likely(dp[i] != DISCARD_FILLER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) may_be &= ~MAY_BE_FILLER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) hash_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (unlikely(hash_offset == ic->tag_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (unlikely(!may_be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dm_bufio_release(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) hash_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) dm_bufio_release(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) tag += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) *metadata_offset += to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) (*metadata_block)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *metadata_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (unlikely(!is_power_of_2(ic->tag_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) hash_offset = (hash_offset + to_copy) % ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) total_size -= to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } while (unlikely(total_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) #undef MAY_BE_FILLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) #undef MAY_BE_HASH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct flush_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct dm_io_request io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct dm_io_region io_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct dm_integrity_c *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static void flush_notify(unsigned long error, void *fr_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct flush_request *fr = fr_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (unlikely(error != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) complete(&fr->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct flush_request fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) flush_data = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (flush_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) fr.io_req.bi_op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) fr.io_req.mem.type = DM_IO_KMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) fr.io_req.mem.ptr.addr = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) fr.io_req.notify.fn = flush_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) fr.io_req.notify.context = &fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) fr.io_reg.bdev = ic->dev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) fr.io_reg.sector = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) fr.io_reg.count = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) fr.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) init_completion(&fr.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) BUG_ON(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) r = dm_bufio_write_dirty_buffers(ic->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) dm_integrity_io_error(ic, "writing tags", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (flush_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) wait_for_completion(&fr.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void sleep_on_endio_wait(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) __add_wait_queue(&ic->endio_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) __set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) __remove_wait_queue(&ic->endio_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static void autocommit_fn(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (likely(!dm_integrity_failed(ic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) queue_work(ic->commit_wq, &ic->commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static void schedule_autocommit(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!timer_pending(&ic->autocommit_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) spin_lock_irqsave(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) bio_list_add(&ic->flush_bio_list, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) queue_work(ic->commit_wq, &ic->commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) int r = dm_integrity_failed(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (unlikely(r) && !bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) bio->bi_status = errno_to_blk_status(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) spin_lock_irqsave(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) bio_list_add(&ic->synchronous_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) submit_flush_bio(ic, dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) do_endio(ic, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void dec_in_flight(struct dm_integrity_io *dio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (atomic_dec_and_test(&dio->in_flight)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct dm_integrity_c *ic = dio->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) remove_range(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) schedule_autocommit(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (unlikely(dio->bi_status) && !bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) bio->bi_status = dio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) dio->range.logical_sector += dio->range.n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) INIT_WORK(&dio->work, integrity_bio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) queue_work(ic->offload_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) do_endio_flush(ic, dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static void integrity_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dm_bio_restore(&dio->bio_details, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (bio->bi_integrity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) bio->bi_opf |= REQ_INTEGRITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (dio->completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) complete(dio->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) dec_in_flight(dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) const char *data, char *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) __u64 sector_le = cpu_to_le64(sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) SHASH_DESC_ON_STACK(req, ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) unsigned digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) req->tfm = ic->internal_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) r = crypto_shash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (unlikely(r < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dm_integrity_io_error(ic, "crypto_shash_init", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (unlikely(r < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dm_integrity_io_error(ic, "crypto_shash_update", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (unlikely(r < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) dm_integrity_io_error(ic, "crypto_shash_update", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) r = crypto_shash_final(req, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (unlikely(r < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) dm_integrity_io_error(ic, "crypto_shash_final", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) digest_size = crypto_shash_digestsize(ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (unlikely(digest_size < ic->tag_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) memset(result + digest_size, 0, ic->tag_size - digest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* this shouldn't happen anyway, the hash functions have no reason to fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) get_random_bytes(result, ic->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void integrity_metadata(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct dm_integrity_c *ic = dio->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) char *checksums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unsigned sectors_to_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (unlikely(ic->mode == 'R'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto skip_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (likely(dio->op != REQ_OP_DISCARD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!checksums) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) checksums = checksums_onstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (WARN_ON(extra_space &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) digest_size > sizeof(checksums_onstack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (unlikely(dio->op == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) unsigned bi_size = dio->bio_details.bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) unsigned max_blocks = max_size / ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) memset(checksums, DISCARD_FILLER, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) while (bi_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) this_step_blocks = min(this_step_blocks, max_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) this_step_blocks * ic->tag_size, TAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (likely(checksums != checksums_onstack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) kfree(checksums);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (likely(checksums != checksums_onstack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) kfree(checksums);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) goto skip_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) sector = dio->range.logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) sectors_to_process = dio->range.n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) unsigned pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) char *mem, *checksums_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) checksums_ptr = checksums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) checksums_ptr += ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) sectors_to_process -= ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) pos += ic->sectors_per_block << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) sector += ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (r > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) r = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) atomic64_inc(&ic->number_of_mismatches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (likely(checksums != checksums_onstack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) kfree(checksums);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (!sectors_to_process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (unlikely(pos < bv.bv_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) bv.bv_offset += pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) bv.bv_len -= pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (likely(checksums != checksums_onstack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) kfree(checksums);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (bip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct bio_vec biv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) unsigned data_to_process = dio->range.n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) sector_to_block(ic, data_to_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) data_to_process *= ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) bip_for_each_vec(biv, bip, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) unsigned char *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) unsigned this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) BUG_ON(PageHighMem(biv.bv_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) this_len = min(biv.bv_len, data_to_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) data_to_process -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (!data_to_process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) skip_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) dec_in_flight(dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) dio->bi_status = errno_to_blk_status(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) dec_in_flight(dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct dm_integrity_c *ic = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct bio_integrity_payload *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) sector_t area, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) dio->ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) dio->op = bio_op(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (unlikely(dio->op == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (ti->max_io_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) unsigned log2_max_io_len = __fls(ti->max_io_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) sector_t start_boundary = sec >> log2_max_io_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (start_boundary < end_boundary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dm_accept_partial_bio(bio, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) submit_flush_bio(ic, dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (unlikely(dio->fua)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * Don't pass down the FUA flag because we have to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * disk cache anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) bio->bi_opf &= ~REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) dio->range.logical_sector, bio_sectors(bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ic->sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) dio->range.logical_sector, bio_sectors(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) bio_for_each_segment(bv, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) bv.bv_offset, bv.bv_len, ic->sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (!ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (bip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (ic->log2_tag_size >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) wanted_tag_size <<= ic->log2_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) wanted_tag_size *= ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) DMERR("Invalid integrity data size %u, expected %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) bip->bip_iter.bi_size, wanted_tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (unlikely(bip != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) DMERR("Unexpected integrity data when using internal hash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) dm_integrity_map_continue(dio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) unsigned journal_section, unsigned journal_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct dm_integrity_c *ic = dio->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) sector_t logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) unsigned n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) logical_sector = dio->range.logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) n_sectors = dio->range.n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct bio_vec bv = bio_iovec(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) char *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) bv.bv_len = n_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) n_sectors -= bv.bv_len >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) retry_kmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) mem = kmap_atomic(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (likely(dio->op == REQ_OP_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) flush_dcache_page(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (unlikely(dio->op == REQ_OP_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct journal_sector *js;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) char *mem_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) unsigned s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (unlikely(journal_entry_is_inprogress(je))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) flush_dcache_page(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) goto retry_kmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) BUG_ON(journal_entry_get_sector(je) != logical_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) js = access_journal_data(ic, journal_section, journal_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) mem_ptr = mem + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) js++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) mem_ptr += 1 << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) } while (++s < ic->sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) #ifdef INTERNAL_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) logical_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (!ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct bio_integrity_payload *bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) unsigned tag_todo = ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) char *tag_ptr = journal_entry_tag(ic, je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (bip) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) unsigned tag_now = min(biv.bv_len, tag_todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) char *tag_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) BUG_ON(PageHighMem(biv.bv_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (likely(dio->op == REQ_OP_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) memcpy(tag_ptr, tag_addr, tag_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) memcpy(tag_addr, tag_ptr, tag_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) tag_ptr += tag_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) tag_todo -= tag_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) } while (unlikely(tag_todo)); else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (likely(dio->op == REQ_OP_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) memset(tag_ptr, 0, tag_todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (likely(dio->op == REQ_OP_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct journal_sector *js;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) unsigned s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) js = access_journal_data(ic, journal_section, journal_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) je->last_bytes[s] = js[s].commit_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) } while (++s < ic->sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (unlikely(digest_size > ic->tag_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) char checksums_onstack[HASH_MAX_DIGESTSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) journal_entry_set_sector(je, logical_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) logical_sector += ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) journal_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (unlikely(journal_entry == ic->journal_section_entries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) journal_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) journal_section++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) wraparound_section(ic, &journal_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (unlikely(dio->op == REQ_OP_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) flush_dcache_page(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) } while (n_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (likely(dio->op == REQ_OP_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) wake_up(&ic->copy_to_journal_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) queue_work(ic->commit_wq, &ic->commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) schedule_autocommit(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) remove_range(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (unlikely(bio->bi_iter.bi_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) sector_t area, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) dio->range.logical_sector = logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) struct dm_integrity_c *ic = dio->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) unsigned journal_section, journal_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) unsigned journal_read_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) struct completion read_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) bool discard_retried = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) need_sync_io = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (need_sync_io && from_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) INIT_WORK(&dio->work, integrity_bio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) queue_work(ic->offload_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) lock_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (unlikely(dm_integrity_failed(ic))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) do_endio(ic, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) dio->range.n_sectors = bio_sectors(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) journal_read_pos = NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (dio->op == REQ_OP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) unsigned next_entry, i, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) unsigned ws, we, range_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) dio->range.n_sectors = min(dio->range.n_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (unlikely(!dio->range.n_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (from_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) goto offload_to_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) sleep_on_endio_wait(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) ic->free_sectors -= range_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) journal_section = ic->free_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) journal_entry = ic->free_section_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) next_entry = ic->free_section_entry + range_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) ic->free_section_entry = next_entry % ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) ic->free_section += next_entry / ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) wraparound_section(ic, &ic->free_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) pos = journal_section * ic->journal_section_entries + journal_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) ws = journal_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) we = journal_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct journal_entry *je;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (unlikely(pos >= ic->journal_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) je = access_journal_entry(ic, ws, we);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) BUG_ON(!journal_entry_is_unused(je));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) journal_entry_set_inprogress(je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) we++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (unlikely(we == ic->journal_section_entries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) we = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) ws++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) wraparound_section(ic, &ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) goto journal_read_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) sector_t next_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (likely(journal_read_pos == NOT_FOUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) dio->range.n_sectors = next_sector - dio->range.logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) unsigned jp = journal_read_pos + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) dio->range.n_sectors = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (unlikely(!add_new_range(ic, &dio->range, true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * We must not sleep in the request routine because it could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * stall bios on current->bio_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * So, we offload the bio to a workqueue if we have to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (from_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) offload_to_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) INIT_WORK(&dio->work, integrity_bio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) queue_work(ic->wait_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (journal_read_pos != NOT_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dio->range.n_sectors = ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) wait_and_add_new_range(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * wait_and_add_new_range drops the spinlock, so the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * may have been changed arbitrarily. We need to recheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * To simplify the code, we restrict I/O size to just one block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (journal_read_pos != NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) sector_t next_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (unlikely(new_pos != journal_read_pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) remove_range_unlocked(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) sector_t next_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (unlikely(new_pos != NOT_FOUND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) remove_range_unlocked(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) queue_work(ic->commit_wq, &ic->commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) flush_workqueue(ic->commit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) queue_work(ic->writer_wq, &ic->writer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) flush_workqueue(ic->writer_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) discard_retried = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) goto lock_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (unlikely(journal_read_pos != NOT_FOUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) journal_section = journal_read_pos / ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) journal_entry = journal_read_pos % ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) goto journal_read_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct bitmap_block_status *bbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) spin_lock(&bbs->bio_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) bio_list_add(&bbs->bio_queue, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) spin_unlock(&bbs->bio_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) queue_work(ic->writer_wq, &bbs->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) dio->in_flight = (atomic_t)ATOMIC_INIT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (need_sync_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) init_completion(&read_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) dio->completion = &read_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) dio->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) dm_bio_record(&dio->bio_details, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) bio_set_dev(bio, ic->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) bio->bi_integrity = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) bio->bi_opf &= ~REQ_INTEGRITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) bio->bi_end_io = integrity_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) integrity_metadata(&dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) dm_integrity_flush_buffers(ic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) dio->in_flight = (atomic_t)ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) dio->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (need_sync_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) wait_for_completion_io(&read_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) goto skip_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) goto skip_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (likely(!bio->bi_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) integrity_metadata(&dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) skip_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) dec_in_flight(dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) INIT_WORK(&dio->work, integrity_metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) queue_work(ic->metadata_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) journal_read_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) goto lock_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) do_endio_flush(ic, dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static void integrity_bio_wait(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) dm_integrity_map_continue(dio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static void pad_uncommitted(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (ic->free_section_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) ic->free_section_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) ic->free_section++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) wraparound_section(ic, &ic->free_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) ic->n_uncommitted_sections++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) (ic->n_uncommitted_sections + ic->n_committed_sections) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) ic->journal_section_entries + ic->free_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) DMCRIT("journal_sections %u, journal_section_entries %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) "n_uncommitted_sections %u, n_committed_sections %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) "journal_section_entries %u, free_sectors %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) ic->journal_sections, ic->journal_section_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) ic->n_uncommitted_sections, ic->n_committed_sections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ic->journal_section_entries, ic->free_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) static void integrity_commit(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) unsigned commit_start, commit_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) unsigned i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct bio *flushes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) del_timer(&ic->autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) flushes = bio_list_get(&ic->flush_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (unlikely(ic->mode != 'J')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) dm_integrity_flush_buffers(ic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) goto release_flush_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) pad_uncommitted(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) commit_start = ic->uncommitted_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) commit_sections = ic->n_uncommitted_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (!commit_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) goto release_flush_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) i = commit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) for (n = 0; n < commit_sections; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) for (j = 0; j < ic->journal_section_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct journal_entry *je;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) je = access_journal_entry(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) for (j = 0; j < ic->journal_section_sectors; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct journal_sector *js;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) js = access_journal(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (unlikely(i >= ic->journal_sections))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) ic->commit_seq = next_commit_seq(ic->commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) wraparound_section(ic, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) write_journal(ic, commit_start, commit_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ic->uncommitted_section += commit_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) wraparound_section(ic, &ic->uncommitted_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) ic->n_uncommitted_sections -= commit_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) ic->n_committed_sections += commit_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) queue_work(ic->writer_wq, &ic->writer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) release_flush_bios:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) while (flushes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct bio *next = flushes->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) flushes->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) do_endio(ic, flushes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) flushes = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static void complete_copy_from_journal(unsigned long error, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) struct journal_io *io = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct journal_completion *comp = io->comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct dm_integrity_c *ic = comp->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) remove_range(ic, &io->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) mempool_free(io, &ic->journal_io_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (unlikely(error != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) dm_integrity_io_error(ic, "copying from journal", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) complete_journal_op(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) struct journal_entry *je)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) unsigned s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) js->commit_id = je->last_bytes[s];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) js++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) } while (++s < ic->sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) unsigned write_sections, bool from_replay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) unsigned i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct journal_completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) comp.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) comp.in_flight = (atomic_t)ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) init_completion(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) i = write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) #ifndef INTERNAL_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (unlikely(from_replay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) rw_section_mac(ic, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) for (j = 0; j < ic->journal_section_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) struct journal_entry *je = access_journal_entry(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) sector_t sec, area, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) unsigned k, l, next_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) sector_t metadata_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) unsigned metadata_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct journal_io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (journal_entry_is_unused(je))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) sec = journal_entry_get_sector(je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (unlikely(from_replay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) sec &= ~(sector_t)(ic->sectors_per_block - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (unlikely(sec >= ic->provided_data_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) journal_entry_set_unused(je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) get_area_and_offset(ic, sec, &area, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) restore_last_bytes(ic, access_journal_data(ic, i, j), je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) for (k = j + 1; k < ic->journal_section_entries; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct journal_entry *je2 = access_journal_entry(ic, i, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) sector_t sec2, area2, offset2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (journal_entry_is_unused(je2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) sec2 = journal_entry_get_sector(je2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (unlikely(sec2 >= ic->provided_data_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) get_area_and_offset(ic, sec2, &area2, &offset2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) next_loop = k - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) io->comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) io->range.logical_sector = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) add_new_range_and_wait(ic, &io->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (likely(!from_replay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) /* don't write if there is newer committed sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) struct journal_entry *je2 = access_journal_entry(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) journal_entry_set_unused(je2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) remove_journal_node(ic, §ion_node[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) sec += ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) offset += ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) journal_entry_set_unused(je2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) remove_journal_node(ic, §ion_node[k - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) k--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (j == k) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) remove_range_unlocked(ic, &io->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) mempool_free(io, &ic->journal_io_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) goto skip_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) for (l = j; l < k; l++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) remove_journal_node(ic, §ion_node[l]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) for (l = j; l < k; l++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct journal_entry *je2 = access_journal_entry(ic, i, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) #ifndef INTERNAL_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) unlikely(from_replay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) (char *)access_journal_data(ic, i, l), test_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) journal_entry_set_unused(je2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ic->tag_size, TAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) dm_integrity_io_error(ic, "reading tags", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) atomic_inc(&comp.in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) (k - j) << ic->sb->log2_sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) get_data_sector(ic, area, offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) complete_copy_from_journal, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) skip_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) j = next_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) dm_bufio_write_dirty_buffers_async(ic->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) complete_journal_op(&comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) wait_for_completion_io(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) dm_integrity_flush_buffers(ic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) static void integrity_writer(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) unsigned write_start, write_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) unsigned prev_free_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /* the following test is not needed, but it tests the replay code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) write_start = ic->committed_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) write_sections = ic->n_committed_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (!write_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) do_journal_write(ic, write_start, write_sections, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) ic->committed_section += write_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) wraparound_section(ic, &ic->committed_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) ic->n_committed_sections -= write_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) prev_free_sectors = ic->free_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ic->free_sectors += write_sections * ic->journal_section_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (unlikely(!prev_free_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) wake_up_locked(&ic->endio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static void recalc_write_super(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) dm_integrity_flush_buffers(ic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (dm_integrity_failed(ic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) dm_integrity_io_error(ic, "writing superblock", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static void integrity_recalc(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) struct dm_integrity_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) struct dm_io_request io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) struct dm_io_region io_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) sector_t area, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) sector_t metadata_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) unsigned metadata_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) sector_t logical_sector, n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) __u8 *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) unsigned super_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) next_chunk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (unlikely(dm_post_suspending(ic->ti)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto unlock_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) goto unlock_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) get_area_and_offset(ic, range.logical_sector, &area, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (!ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) add_new_range_and_wait(ic, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) logical_sector = range.logical_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) n_sectors = range.n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) goto advance_and_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) logical_sector += ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) n_sectors -= ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) n_sectors -= ic->sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) get_area_and_offset(ic, logical_sector, &area, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) recalc_write_super(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) super_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (unlikely(dm_integrity_failed(ic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) io_req.bi_op = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) io_req.bi_op_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) io_req.mem.type = DM_IO_VMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) io_req.mem.ptr.addr = ic->recalc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) io_req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) io_req.client = ic->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) io_loc.bdev = ic->dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) io_loc.sector = get_data_sector(ic, area, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) io_loc.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) r = dm_io(&io_req, 1, &io_loc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) dm_integrity_io_error(ic, "reading data", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) t = ic->recalc_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) t += ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) dm_integrity_io_error(ic, "writing tags", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sector_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) start = (range.logical_sector >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) end = ((range.logical_sector + range.n_sectors) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) advance_and_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) remove_range_unlocked(ic, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) goto next_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) remove_range(ic, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) unlock_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) recalc_write_super(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static void bitmap_block_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) struct dm_integrity_c *ic = bbs->ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) struct bio_list bio_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct bio_list waiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) bio_list_init(&waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) spin_lock(&bbs->bio_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) bio_queue = bbs->bio_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) bio_list_init(&bbs->bio_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) spin_unlock(&bbs->bio_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) while ((bio = bio_list_pop(&bio_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct dm_integrity_io *dio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) remove_range(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) INIT_WORK(&dio->work, integrity_bio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) queue_work(ic->offload_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) dio->range.n_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) bio_list_add(&waiting, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (bio_list_empty(&waiting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) while ((bio = bio_list_pop(&waiting))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) dio->range.n_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) remove_range(ic, &dio->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) INIT_WORK(&dio->work, integrity_bio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) queue_work(ic->offload_wq, &dio->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) static void bitmap_flush_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) struct dm_integrity_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) unsigned long limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) dm_integrity_flush_buffers(ic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) range.logical_sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) range.n_sectors = ic->provided_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) add_new_range_and_wait(ic, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) dm_integrity_flush_buffers(ic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) limit = ic->provided_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) limit = le64_to_cpu(ic->sb->recalc_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /*DEBUG_print("zeroing journal\n");*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) remove_range_unlocked(ic, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) spin_lock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) spin_unlock_irq(&ic->endio_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) unsigned n_sections, unsigned char commit_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) unsigned i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (!n_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) for (n = 0; n < n_sections; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) i = start_section + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) wraparound_section(ic, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) for (j = 0; j < ic->journal_section_sectors; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) struct journal_sector *js = access_journal(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) for (j = 0; j < ic->journal_section_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) struct journal_entry *je = access_journal_entry(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) journal_entry_set_unused(je);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) write_journal(ic, start_section, n_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) unsigned char k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) for (k = 0; k < N_COMMIT_IDS; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (dm_integrity_commit_id(ic, i, j, k) == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) return k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) dm_integrity_io_error(ic, "journal commit id", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) static void replay_journal(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) unsigned i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) bool used_commit_ids[N_COMMIT_IDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) unsigned max_commit_id_sections[N_COMMIT_IDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) unsigned write_start, write_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) unsigned continue_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) bool journal_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) unsigned char unused, last_used, want_commit_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (ic->mode == 'R')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (ic->journal_uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) last_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) write_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (!ic->just_formatted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) DEBUG_print("reading journal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (ic->journal_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (ic->journal_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) struct journal_completion crypt_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) crypt_comp.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) init_completion(&crypt_comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) wait_for_completion(&crypt_comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (dm_integrity_failed(ic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) goto clear_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) journal_empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) memset(used_commit_ids, 0, sizeof used_commit_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) for (i = 0; i < ic->journal_sections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) for (j = 0; j < ic->journal_section_sectors; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct journal_sector *js = access_journal(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) k = find_commit_seq(ic, i, j, js->commit_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (k < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) goto clear_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) used_commit_ids[k] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) max_commit_id_sections[k] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (journal_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) for (j = 0; j < ic->journal_section_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct journal_entry *je = access_journal_entry(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (!journal_entry_is_unused(je)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) journal_empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (!used_commit_ids[N_COMMIT_IDS - 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) unused = N_COMMIT_IDS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) while (unused && !used_commit_ids[unused - 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) unused--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) for (unused = 0; unused < N_COMMIT_IDS; unused++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (!used_commit_ids[unused])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (unused == N_COMMIT_IDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) dm_integrity_io_error(ic, "journal commit ids", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) goto clear_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) unused, used_commit_ids[0], used_commit_ids[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) used_commit_ids[2], used_commit_ids[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) last_used = prev_commit_seq(unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) want_commit_seq = prev_commit_seq(last_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) journal_empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) write_start = max_commit_id_sections[last_used] + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if (unlikely(write_start >= ic->journal_sections))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) want_commit_seq = next_commit_seq(want_commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) wraparound_section(ic, &write_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) i = write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) for (j = 0; j < ic->journal_section_sectors; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) struct journal_sector *js = access_journal(ic, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) * This could be caused by crash during writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) * We won't replay the inconsistent part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) * journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) goto brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (unlikely(i >= ic->journal_sections))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) want_commit_seq = next_commit_seq(want_commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) wraparound_section(ic, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) brk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (!journal_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) write_sections, write_start, want_commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) do_journal_write(ic, write_start, write_sections, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) continue_section = write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) ic->commit_seq = want_commit_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) unsigned s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) unsigned char erase_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) clear_journal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) DEBUG_print("clearing journal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) erase_seq = prev_commit_seq(prev_commit_seq(last_used));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) s = write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) init_journal(ic, s, 1, erase_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) wraparound_section(ic, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (ic->journal_sections >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) init_journal(ic, s, ic->journal_sections - 2, erase_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) s += ic->journal_sections - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) wraparound_section(ic, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) init_journal(ic, s, 1, erase_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) continue_section = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) ic->commit_seq = next_commit_seq(erase_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) ic->committed_section = continue_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ic->n_committed_sections = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ic->uncommitted_section = continue_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) ic->n_uncommitted_sections = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) ic->free_section = continue_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) ic->free_section_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) ic->free_sectors = ic->journal_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) ic->journal_tree_root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) for (i = 0; i < ic->journal_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) init_journal_node(&ic->journal_tree[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) DEBUG_print("dm_integrity_enter_synchronous_mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) ic->synchronous_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) cancel_delayed_work_sync(&ic->bitmap_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) flush_workqueue(ic->commit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) DEBUG_print("dm_integrity_reboot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) dm_integrity_enter_synchronous_mode(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) static void dm_integrity_postsuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) del_timer_sync(&ic->autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (ic->recalc_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) drain_workqueue(ic->recalc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) if (ic->mode == 'B')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) cancel_delayed_work_sync(&ic->bitmap_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) queue_work(ic->commit_wq, &ic->commit_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) drain_workqueue(ic->commit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (ic->mode == 'J') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) queue_work(ic->writer_wq, &ic->writer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) drain_workqueue(ic->writer_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) dm_integrity_flush_buffers(ic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) dm_integrity_flush_buffers(ic, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) /* set to 0 to test bitmap replay code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) init_journal(ic, 0, ic->journal_sections, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) dm_integrity_io_error(ic, "writing superblock", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ic->journal_uptodate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static void dm_integrity_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) DEBUG_print("resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (ic->provided_data_sectors != old_provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (ic->provided_data_sectors > old_provided_data_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ic->mode == 'B' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) dm_integrity_io_error(ic, "writing superblock", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) DEBUG_print("resume dirty_bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) BITMAP_OP_TEST_ALL_CLEAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) ic->sb->recalc_sector = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) ic->sb->recalc_sector = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) ic->sb->recalc_sector = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) init_journal(ic, 0, ic->journal_sections, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) replay_journal(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) dm_integrity_io_error(ic, "writing superblock", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) replay_journal(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) dm_integrity_io_error(ic, "writing superblock", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) DEBUG_print("testing recalc: %x\n", ic->sb->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (recalc_pos < ic->provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) queue_work(ic->recalc_wq, &ic->recalc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) } else if (recalc_pos > ic->provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) recalc_write_super(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) ic->reboot_notifier.notifier_call = dm_integrity_reboot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) ic->reboot_notifier.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /* set to 1 to stress test synchronous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) dm_integrity_enter_synchronous_mode(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) static void dm_integrity_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) unsigned arg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) size_t sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) DMEMIT("%llu %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) (unsigned long long)atomic64_read(&ic->number_of_mismatches),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) DMEMIT(" -");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) case STATUSTYPE_TABLE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) watermark_percentage += ic->journal_entries / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) do_div(watermark_percentage, ic->journal_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) arg_count = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) arg_count += !!ic->meta_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) arg_count += ic->sectors_per_block != 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) arg_count += ic->discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) arg_count += ic->mode == 'J';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) arg_count += ic->mode == 'J';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) arg_count += ic->mode == 'B';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) arg_count += ic->mode == 'B';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) arg_count += !!ic->internal_hash_alg.alg_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) arg_count += !!ic->journal_crypt_alg.alg_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) arg_count += !!ic->journal_mac_alg.alg_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) arg_count += ic->legacy_recalculate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) ic->tag_size, ic->mode, arg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) if (ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) DMEMIT(" meta_device:%s", ic->meta_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (ic->sectors_per_block != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) DMEMIT(" recalculate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (ic->discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) DMEMIT(" allow_discards");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) if (ic->mode == 'J') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) DMEMIT(" commit_time:%u", ic->autocommit_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) DMEMIT(" fix_padding");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) if (ic->legacy_recalculate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) DMEMIT(" legacy_recalculate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) #define EMIT_ALG(a, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) if (ic->a.alg_string) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) DMEMIT(" %s:%s", n, ic->a.alg_string); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) if (ic->a.key_string) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) DMEMIT(":%s", ic->a.key_string);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) EMIT_ALG(internal_hash_alg, "internal_hash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) EMIT_ALG(journal_crypt_alg, "journal_crypt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) EMIT_ALG(journal_mac_alg, "journal_mac");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) static int dm_integrity_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct dm_integrity_c *ic = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) if (!ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) return fn(ti, ic->dev, 0, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) struct dm_integrity_c *ic = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (ic->sectors_per_block > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) static void calculate_journal_section_size(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) unsigned sector_space = JOURNAL_SECTOR_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) JOURNAL_ENTRY_ROUNDUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) sector_space -= JOURNAL_MAC_PER_SECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) static int calculate_device_limits(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) __u64 initial_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) calculate_journal_section_size(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) ic->initial_sectors = initial_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) sector_t last_sector, last_area, last_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) /* we have to maintain excessive padding for compatibility with existing volumes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) __u64 metadata_run_padding =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) metadata_run_padding) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (!(ic->metadata_run & (ic->metadata_run - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) ic->log2_metadata_run = __ffs(ic->metadata_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) ic->log2_metadata_run = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) last_sector = get_data_sector(ic, last_area, last_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) meta_size <<= ic->log2_buffer_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) if (ic->initial_sectors + meta_size < ic->initial_sectors ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) ic->initial_sectors + meta_size > ic->meta_device_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) ic->metadata_run = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) ic->log2_metadata_run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) static void get_provided_data_sectors(struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) int test_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) ic->provided_data_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) __u64 prev_data_sectors = ic->provided_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) ic->provided_data_sectors |= (sector_t)1 << test_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (calculate_device_limits(ic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ic->provided_data_sectors = prev_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) ic->provided_data_sectors = ic->data_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) unsigned journal_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) int test_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) memcpy(ic->sb->magic, SB_MAGIC, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (ic->journal_mac_alg.alg_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) calculate_journal_section_size(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) journal_sections = journal_sectors / ic->journal_section_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) if (!journal_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) journal_sections = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (ic->fix_padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) ic->sb->journal_sections = cpu_to_le32(journal_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) if (!interleave_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) get_provided_data_sectors(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (!ic->provided_data_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) ic->sb->log2_interleave_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) get_provided_data_sectors(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) if (!ic->provided_data_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) try_smaller_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) ic->sb->journal_sections = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) if (test_journal_sections > journal_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (calculate_device_limits(ic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (!le32_to_cpu(ic->sb->journal_sections)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) if (ic->log2_buffer_sectors > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) ic->log2_buffer_sectors--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) goto try_smaller_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) sb_set_version(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) struct blk_integrity bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) memset(&bi, 0, sizeof(bi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) bi.profile = &dm_integrity_profile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) bi.tuple_size = ic->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) bi.tag_size = bi.tuple_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) blk_integrity_register(disk, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static void dm_integrity_free_page_list(struct page_list *pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) if (!pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) for (i = 0; pl[i].page; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) __free_page(pl[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) kvfree(pl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) struct page_list *pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (!pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) for (i = 0; i < n_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) pl[i].page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) if (!pl[i].page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) dm_integrity_free_page_list(pl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) pl[i - 1].next = &pl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) pl[i].page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) pl[i].next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) for (i = 0; i < ic->journal_sections; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) kvfree(sl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) kvfree(sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) struct page_list *pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) struct scatterlist **sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) sl = kvmalloc_array(ic->journal_sections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) sizeof(struct scatterlist *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) if (!sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) for (i = 0; i < ic->journal_sections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) unsigned start_index, start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) unsigned end_index, end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) unsigned n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) page_list_location(ic, i, 0, &start_index, &start_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) page_list_location(ic, i, ic->journal_section_sectors - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) &end_index, &end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) n_pages = (end_index - start_index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) dm_integrity_free_journal_scatterlist(ic, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) sg_init_table(s, n_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) for (idx = start_index; idx <= end_index; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) char *va = lowmem_page_address(pl[idx].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) unsigned start = 0, end = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) if (idx == start_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) start = start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) if (idx == end_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) end = end_offset + (1 << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) sg_set_buf(&s[idx - start_index], va + start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) sl[i] = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) return sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) static void free_alg(struct alg_spec *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) kfree_sensitive(a->alg_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) kfree_sensitive(a->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) memset(a, 0, sizeof *a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) char *k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) free_alg(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (!a->alg_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) k = strchr(a->alg_string, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (k) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) *k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) a->key_string = k + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (strlen(a->key_string) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) a->key_size = strlen(a->key_string) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) a->key = kmalloc(a->key_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (!a->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (hex2bin(a->key, a->key_string, a->key_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) inval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) *error = error_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) *error = "Out of memory for an argument";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) char *error_alg, char *error_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (a->alg_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (IS_ERR(*hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) *error = error_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) r = PTR_ERR(*hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) *hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (a->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) r = crypto_shash_setkey(*hash, a->key, a->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) *error = error_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) *error = error_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) return -ENOKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) static int create_journal(struct dm_integrity_c *ic, char **error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) __u64 journal_pages, journal_desc_size, journal_tree_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) unsigned char *crypt_data = NULL, *crypt_iv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) struct skcipher_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) journal_desc_size = journal_pages * sizeof(struct page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) *error = "Journal doesn't fit into memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) ic->journal_pages = journal_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) if (!ic->journal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) *error = "Could not allocate memory for journal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) if (ic->journal_crypt_alg.alg_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) unsigned ivsize, blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) struct journal_completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) comp.ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (IS_ERR(ic->journal_crypt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) *error = "Invalid journal cipher";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) r = PTR_ERR(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) ic->journal_crypt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (ic->journal_crypt_alg.key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) ic->journal_crypt_alg.key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) *error = "Error setting encryption key";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) DEBUG_print("cipher %s, block size %u iv size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) ic->journal_crypt_alg.alg_string, blocksize, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) if (!ic->journal_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) *error = "Could not allocate memory for journal io";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) if (blocksize == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) *error = "Could not allocate crypt request";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) crypt_iv = kzalloc(ivsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) if (!crypt_iv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) *error = "Could not allocate iv";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (!ic->journal_xor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) *error = "Could not allocate memory for journal xor";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) sg = kvmalloc_array(ic->journal_pages + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) if (!sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) *error = "Unable to allocate sg list";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) sg_init_table(sg, ic->journal_pages + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) for (i = 0; i < ic->journal_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) char *va = lowmem_page_address(ic->journal_xor[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) clear_page(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) sg_set_buf(&sg[i], va, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) skcipher_request_set_crypt(req, sg, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) init_completion(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) comp.in_flight = (atomic_t)ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (do_crypt(true, req, &comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) wait_for_completion(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) kvfree(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) r = dm_integrity_failed(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) *error = "Unable to encrypt journal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) crypto_free_skcipher(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) ic->journal_crypt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) unsigned crypt_len = roundup(ivsize, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) *error = "Could not allocate crypt request";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) crypt_iv = kmalloc(ivsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (!crypt_iv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) *error = "Could not allocate iv";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) crypt_data = kmalloc(crypt_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (!crypt_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) *error = "Unable to allocate crypt data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) if (!ic->journal_scatterlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) *error = "Unable to allocate sg list";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (!ic->journal_io_scatterlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) *error = "Unable to allocate sg list";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) ic->sk_requests = kvmalloc_array(ic->journal_sections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) sizeof(struct skcipher_request *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (!ic->sk_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) *error = "Unable to allocate sk requests";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) for (i = 0; i < ic->journal_sections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) struct skcipher_request *section_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) __u32 section_le = cpu_to_le32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) memset(crypt_iv, 0x00, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) memset(crypt_data, 0x00, crypt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) sg_init_one(&sg, crypt_data, crypt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) init_completion(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) comp.in_flight = (atomic_t)ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (do_crypt(true, req, &comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) wait_for_completion(&comp.comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) r = dm_integrity_failed(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) *error = "Unable to generate iv";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) if (!section_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) *error = "Unable to allocate crypt request";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) section_req->iv = kmalloc_array(ivsize, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) if (!section_req->iv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) skcipher_request_free(section_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) *error = "Unable to allocate iv";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) memcpy(section_req->iv + ivsize, crypt_data, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) ic->sk_requests[i] = section_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) for (i = 0; i < N_COMMIT_IDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) unsigned j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) retest_commit_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if (ic->commit_ids[j] == ic->commit_ids[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) goto retest_commit_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) if (journal_tree_size > ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) *error = "Journal doesn't fit into memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) if (!ic->journal_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) *error = "Could not allocate memory for journal tree";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) kfree(crypt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) kfree(crypt_iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) skcipher_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) * Construct a integrity mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) * device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) * offset from the start of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) * tag size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) * number of optional arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) * optional arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) * journal_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) * interleave_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) * buffer_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) * journal_watermark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) * commit_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) * meta_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) * block_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) * sectors_per_bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) * bitmap_flush_interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) * internal_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) * journal_crypt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) * journal_mac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) * recalculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) struct dm_integrity_c *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) unsigned extra_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) struct dm_arg_set as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) {0, 16, "Invalid number of feature args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) bool should_write_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) __u64 threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) unsigned long long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) __s8 log2_sectors_per_bitmap_bit = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) __s8 log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) __u64 bits_in_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) __u64 n_bitmap_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) #define DIRECT_ARGUMENTS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (argc <= DIRECT_ARGUMENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) ti->error = "Invalid argument count";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) if (!ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) ti->error = "Cannot allocate integrity context";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) ti->private = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) ti->per_io_data_size = sizeof(struct dm_integrity_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) ic->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) ic->in_progress = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) INIT_LIST_HEAD(&ic->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) init_waitqueue_head(&ic->endio_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) bio_list_init(&ic->flush_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) init_waitqueue_head(&ic->copy_to_journal_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) init_completion(&ic->crypto_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) atomic64_set(&ic->number_of_mismatches, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) ti->error = "Device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) ti->error = "Invalid starting offset";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) ic->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) if (strcmp(argv[2], "-")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) ti->error = "Invalid tag size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) ic->mode = argv[3][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) ti->error = "Invalid mode (expecting J, B, D, R)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) journal_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) buffer_sectors = DEFAULT_BUFFER_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) journal_watermark = DEFAULT_JOURNAL_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) sync_msec = DEFAULT_SYNC_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) ic->sectors_per_block = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) as.argc = argc - DIRECT_ARGUMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) as.argv = argv + DIRECT_ARGUMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) while (extra_args--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) const char *opt_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) unsigned val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) unsigned long long llval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) opt_string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (!opt_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) ti->error = "Not enough feature arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) journal_sectors = val ? val : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) interleave_sectors = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) buffer_sectors = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) journal_watermark = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) sync_msec = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) dm_put_device(ti, ic->meta_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) ic->meta_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) r = dm_get_device(ti, strchr(opt_string, ':') + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) dm_table_get_mode(ti->table), &ic->meta_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) ti->error = "Device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (val < 1 << SECTOR_SHIFT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) (val & (val -1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) ti->error = "Invalid block_size argument";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) ic->sectors_per_block = val >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) ti->error = "Invalid bitmap_flush_interval argument";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) ic->bitmap_flush_interval = msecs_to_jiffies(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) "Invalid internal_hash argument");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) "Invalid journal_crypt argument");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) "Invalid journal_mac argument");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) } else if (!strcmp(opt_string, "recalculate")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) ic->recalculate_flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) } else if (!strcmp(opt_string, "allow_discards")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) ic->discard = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) } else if (!strcmp(opt_string, "fix_padding")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) ic->fix_padding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) } else if (!strcmp(opt_string, "legacy_recalculate")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) ic->legacy_recalculate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) ti->error = "Invalid argument";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (!ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) ic->meta_device_sectors = ic->data_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (!journal_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (!buffer_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) buffer_sectors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) "Invalid internal hash", "Error setting internal hash key");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) "Invalid journal mac", "Error setting journal mac key");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) if (!ic->tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) if (!ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) ti->error = "Unknown tag size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) if (ic->tag_size > MAX_TAG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) ti->error = "Too big tag size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) if (!(ic->tag_size & (ic->tag_size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ic->log2_tag_size = __ffs(ic->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) ic->log2_tag_size = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) if (ic->mode == 'B' && !ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) ti->error = "Bitmap mode can be only used with internal hash";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if (ic->discard && !ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) ti->error = "Discard can be only used with internal hash";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) ic->autocommit_msec = sync_msec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) ic->io = dm_io_client_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) if (IS_ERR(ic->io)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) r = PTR_ERR(ic->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) ic->io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) ti->error = "Cannot allocate dm io";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) ti->error = "Cannot allocate mempool";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (!ic->metadata_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * If this workqueue were percpu, it would cause bio reordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) * and reduced performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) if (!ic->wait_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) METADATA_WORKQUEUE_MAX_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (!ic->offload_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) if (!ic->commit_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) INIT_WORK(&ic->commit_work, integrity_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) if (ic->mode == 'J' || ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) if (!ic->writer_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) INIT_WORK(&ic->writer_work, integrity_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) if (!ic->sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) ti->error = "Cannot allocate superblock area";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) r = sync_rw_sb(ic, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) ti->error = "Error reading superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) should_write_sb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) if (ic->mode != 'R') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) ti->error = "The device is not initialized";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) r = initialize_superblock(ic, journal_sectors, interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) ti->error = "Could not initialize superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) if (ic->mode != 'R')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) should_write_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) ti->error = "Unknown version";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) ti->error = "Tag size doesn't match the information in superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) ti->error = "Block size doesn't match the information in superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) if (!le32_to_cpu(ic->sb->journal_sections)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) ti->error = "Corrupted superblock, journal_sections is 0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) /* make sure that ti->max_io_len doesn't overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) ti->error = "Invalid interleave_sectors in the superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) if (ic->sb->log2_interleave_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) ti->error = "Invalid interleave_sectors in the superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) ti->error = "Journal mac mismatch";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) get_provided_data_sectors(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (!ic->provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) ti->error = "The device is too small";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) try_smaller_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) r = calculate_device_limits(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) if (ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (ic->log2_buffer_sectors > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) ic->log2_buffer_sectors--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) goto try_smaller_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) ti->error = "The device is too small";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) if (log2_sectors_per_bitmap_bit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (bits_in_journal > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) bits_in_journal = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) log2_sectors_per_bitmap_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) if (should_write_sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) if (!ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) if (ti->len > ic->provided_data_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) ti->error = "Not enough provided sectors for requested mapping size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) threshold += 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) do_div(threshold, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) ic->free_sectors_threshold = threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) DEBUG_print("initialized:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) DEBUG_print(" journal_entries %u\n", ic->journal_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) ic->sb->recalc_sector = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (ic->internal_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) if (!ic->recalc_wq ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) ti->error = "Cannot allocate workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) INIT_WORK(&ic->recalc_work, integrity_recalc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) if (!ic->recalc_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) ti->error = "Cannot allocate buffer for recalculating";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) ic->tag_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) if (!ic->recalc_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) ti->error = "Cannot allocate tags for recalculating";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) ti->error = "Recalculate can only be specified with internal_hash";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) dm_integrity_disable_recalculate(ic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) if (IS_ERR(ic->bufio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) r = PTR_ERR(ic->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) ti->error = "Cannot initialize dm-bufio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) ic->bufio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) if (ic->mode != 'R') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) r = create_journal(ic, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) if (!ic->recalc_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (!ic->may_write_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) if (!ic->bbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) for (i = 0; i < ic->n_bitmap_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) struct bitmap_block_status *bbs = &ic->bbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) unsigned sector, pl_index, pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) INIT_WORK(&bbs->work, bitmap_block_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) bbs->ic = ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) bbs->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) bio_list_init(&bbs->bio_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) spin_lock_init(&bbs->bio_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if (should_write_sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) init_journal(ic, 0, ic->journal_sections, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) r = dm_integrity_failed(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) ti->error = "Error initializing journal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) ti->error = "Error initializing superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) ic->just_formatted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) if (!ic->meta_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) if (ic->mode == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) if (!max_io_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) max_io_len = 1U << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) if (!ti->max_io_len || ti->max_io_len > max_io_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) r = dm_set_target_max_io_len(ti, max_io_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) if (!ic->internal_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) dm_integrity_set(ti, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) ti->flush_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) if (ic->discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) dm_integrity_dtr(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) static void dm_integrity_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) struct dm_integrity_c *ic = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) BUG_ON(!list_empty(&ic->wait_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) if (ic->metadata_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) destroy_workqueue(ic->metadata_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) if (ic->wait_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) destroy_workqueue(ic->wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) if (ic->offload_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) destroy_workqueue(ic->offload_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (ic->commit_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) destroy_workqueue(ic->commit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) if (ic->writer_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) destroy_workqueue(ic->writer_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (ic->recalc_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) destroy_workqueue(ic->recalc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) vfree(ic->recalc_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) kvfree(ic->recalc_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) kvfree(ic->bbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) if (ic->bufio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) dm_bufio_client_destroy(ic->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) mempool_exit(&ic->journal_io_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) if (ic->io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) dm_io_client_destroy(ic->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) if (ic->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) dm_put_device(ti, ic->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) if (ic->meta_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) dm_put_device(ti, ic->meta_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) dm_integrity_free_page_list(ic->journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) dm_integrity_free_page_list(ic->journal_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) dm_integrity_free_page_list(ic->journal_xor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) dm_integrity_free_page_list(ic->recalc_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) dm_integrity_free_page_list(ic->may_write_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) if (ic->journal_scatterlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (ic->journal_io_scatterlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) if (ic->sk_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) for (i = 0; i < ic->journal_sections; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) struct skcipher_request *req = ic->sk_requests[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) kfree_sensitive(req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) skcipher_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) kvfree(ic->sk_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) kvfree(ic->journal_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) if (ic->sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) if (ic->internal_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) crypto_free_shash(ic->internal_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) free_alg(&ic->internal_hash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) if (ic->journal_crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) crypto_free_skcipher(ic->journal_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) free_alg(&ic->journal_crypt_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) if (ic->journal_mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) crypto_free_shash(ic->journal_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) free_alg(&ic->journal_mac_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) kfree(ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) static struct target_type integrity_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) .name = "integrity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) .version = {1, 6, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) .ctr = dm_integrity_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) .dtr = dm_integrity_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) .map = dm_integrity_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) .postsuspend = dm_integrity_postsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) .resume = dm_integrity_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) .status = dm_integrity_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) .iterate_devices = dm_integrity_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) .io_hints = dm_integrity_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) static int __init dm_integrity_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) journal_io_cache = kmem_cache_create("integrity_journal_io",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) sizeof(struct journal_io), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) if (!journal_io_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) DMERR("can't allocate journal io cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) r = dm_register_target(&integrity_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) DMERR("register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) static void __exit dm_integrity_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) dm_unregister_target(&integrity_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) kmem_cache_destroy(journal_io_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) module_init(dm_integrity_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) module_exit(dm_integrity_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) MODULE_AUTHOR("Milan Broz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) MODULE_AUTHOR("Mikulas Patocka");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) MODULE_LICENSE("GPL");