^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018 Red Hat. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dm-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dm-kcopyd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/libnvdimm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define DM_MSG_PREFIX "writecache"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define HIGH_WATERMARK 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define LOW_WATERMARK 45
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define MAX_WRITEBACK_JOBS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define ENDIO_LATENCY 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define WRITEBACK_LATENCY 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define AUTOCOMMIT_BLOCKS_SSD 65536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define AUTOCOMMIT_BLOCKS_PMEM 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define AUTOCOMMIT_MSEC 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define MAX_AGE_DIV 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define MAX_AGE_UNSPECIFIED -1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define BITMAP_GRANULARITY 65536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #if BITMAP_GRANULARITY < PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #undef BITMAP_GRANULARITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define BITMAP_GRANULARITY PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DM_WRITECACHE_HAS_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifdef DM_WRITECACHE_HAS_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define pmem_assign(dest, src) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) typeof(dest) uniq = (src); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define pmem_assign(dest, src) ((dest) = (src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define MEMORY_SUPERBLOCK_VERSION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct wc_memory_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __le64 original_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __le64 seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct wc_memory_superblock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __le32 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __le32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __le32 block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __le32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __le64 n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __le64 seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __le64 padding[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct wc_memory_entry entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct wc_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct list_head lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned short wc_list_contiguous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bool write_in_progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) :1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) :47
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) uint64_t original_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uint64_t seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #ifdef DM_WRITECACHE_HAS_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define WC_MODE_PMEM(wc) false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define WC_MODE_FUA(wc) false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct dm_writecache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct list_head lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct list_head freelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct rb_root freetree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct wc_entry *current_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct rb_root tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) size_t freelist_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) size_t writeback_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) size_t freelist_high_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) size_t freelist_low_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long max_age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned uncommitted_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned autocommit_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned max_writeback_jobs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long autocommit_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct timer_list autocommit_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct wait_queue_head freelist_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct timer_list max_age_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) atomic_t bio_in_progress[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct wait_queue_head bio_in_progress_wait[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct dm_dev *ssd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) sector_t start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void *memory_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) uint64_t memory_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size_t metadata_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) size_t n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) uint64_t seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sector_t data_device_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void *block_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct wc_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned char block_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bool pmem_mode:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool writeback_fua:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) bool overwrote_committed:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool memory_vmapped:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool start_sector_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bool high_wm_percent_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bool low_wm_percent_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool max_writeback_jobs_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bool autocommit_blocks_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bool autocommit_time_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bool max_age_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bool writeback_fua_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool flush_on_suspend:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bool cleaner:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bool cleaner_set:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned high_wm_percent_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned low_wm_percent_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned autocommit_time_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned max_age_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned writeback_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct workqueue_struct *writeback_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct work_struct writeback_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct work_struct flush_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct dm_io_client *dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) raw_spinlock_t endio_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct list_head endio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct task_struct *endio_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct task_struct *flush_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct bio_list flush_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct dm_kcopyd_client *dm_kcopyd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long *dirty_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned dirty_bitmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct bio_set bio_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mempool_t copy_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define WB_LIST_INLINE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct writeback_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct list_head endio_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct dm_writecache *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct wc_entry **wc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned wc_list_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct wc_entry *wc_list_inline[WB_LIST_INLINE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct bio bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct copy_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct list_head endio_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct dm_writecache *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) "A percentage of time allocated for data copying");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void wc_lock(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) mutex_lock(&wc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void wc_unlock(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mutex_unlock(&wc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #ifdef DM_WRITECACHE_HAS_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int persistent_memory_claim(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) loff_t s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) long p, da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pfn_t pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sector_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) wc->memory_vmapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) s = wc->memory_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) p = s >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (p != s >> PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) r = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) offset = get_start_sect(wc->ssd_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (offset & (PAGE_SIZE / 512 - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) offset >>= PAGE_SHIFT - 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) id = dax_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (da < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) wc->memory_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) r = da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!pfn_t_has_page(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) wc->memory_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (da != p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wc->memory_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) long daa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) NULL, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (daa <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) r = daa ? daa : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!pfn_t_has_page(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) while (daa-- && i < p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pages[i++] = pfn_t_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pfn.val++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!(i & 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) } while (i < p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!wc->memory_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) wc->memory_vmapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dax_read_unlock(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dax_read_unlock(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int persistent_memory_claim(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void persistent_memory_release(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (wc->memory_vmapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static struct page *persistent_memory_page(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (is_vmalloc_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return vmalloc_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static unsigned persistent_memory_page_offset(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return (unsigned long)addr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void persistent_memory_flush_cache(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (is_vmalloc_addr(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) flush_kernel_vmap_range(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void persistent_memory_invalidate_cache(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (is_vmalloc_addr(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) invalidate_kernel_vmap_range(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static struct wc_memory_superblock *sb(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return wc->memory_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return &sb(wc)->entries[e->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return (char *)wc->block_start + (e->index << wc->block_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return wc->start_sector + wc->metadata_sectors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return e->original_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return le64_to_cpu(memory_entry(wc, e)->original_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return e->seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return le64_to_cpu(memory_entry(wc, e)->seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) e->seq_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) uint64_t original_sector, uint64_t seq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct wc_memory_entry me;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) e->original_sector = original_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) e->seq_count = seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) me.original_sector = cpu_to_le64(original_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) me.seq_count = cpu_to_le64(seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) pmem_assign(*memory_entry(wc, e), me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #define writecache_error(wc, err, msg, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!cmpxchg(&(wc)->error, 0, err)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) DMERR(msg, ##arg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) wake_up(&(wc)->freelist_wait); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void writecache_flush_all_metadata(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) wc->dirty_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct io_notify {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct dm_writecache *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct completion c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) atomic_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void writecache_notify_io(unsigned long error, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct io_notify *endio = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (unlikely(error != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) writecache_error(endio->wc, -EIO, "error writing metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) BUG_ON(atomic_read(&endio->count) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (atomic_dec_and_test(&endio->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) complete(&endio->c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) wait_event(wc->bio_in_progress_wait[direction],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) !atomic_read(&wc->bio_in_progress[direction]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct dm_io_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct dm_io_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct io_notify endio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) COMPLETION_INITIALIZER_ONSTACK(endio.c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ATOMIC_INIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (unlikely(i == bitmap_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) region.bdev = wc->ssd_dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (unlikely(region.sector >= wc->metadata_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (unlikely(region.sector + region.count > wc->metadata_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) region.count = wc->metadata_sectors - region.sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) region.sector += wc->start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) atomic_inc(&endio.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) req.bi_op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) req.bi_op_flags = REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) req.mem.type = DM_IO_VMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) req.client = wc->dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) req.notify.fn = writecache_notify_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) req.notify.context = &endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* writing via async dm-io (implied by notify.fn above) won't return an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) (void) dm_io(&req, 1, ®ion, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) i = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) writecache_notify_io(0, &endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) wait_for_completion_io(&endio.c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (wait_for_ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) writecache_wait_for_ios(wc, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) writecache_disk_flush(wc, wc->ssd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static void ssd_commit_superblock(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct dm_io_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct dm_io_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) region.bdev = wc->ssd_dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) region.sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (unlikely(region.sector + region.count > wc->metadata_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) region.count = wc->metadata_sectors - region.sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) region.sector += wc->start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) req.bi_op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) req.bi_op_flags = REQ_SYNC | REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) req.mem.type = DM_IO_VMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) req.mem.ptr.vma = (char *)wc->memory_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) req.client = wc->dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) req.notify.context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) r = dm_io(&req, 1, ®ion, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) writecache_error(wc, r, "error writing superblock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pmem_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ssd_commit_flushed(wc, wait_for_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct dm_io_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct dm_io_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) region.bdev = dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) region.sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) region.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) req.bi_op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) req.bi_op_flags = REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) req.mem.type = DM_IO_KMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) req.mem.ptr.addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) req.client = wc->dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) r = dm_io(&req, 1, ®ion, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) writecache_error(wc, r, "error flushing metadata: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #define WFE_RETURN_FOLLOWING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define WFE_LOWEST_SEQ 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) uint64_t block, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct rb_node *node = wc->tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (unlikely(!node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) e = container_of(node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (read_original_sector(wc, e) == block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) node = (read_original_sector(wc, e) >= block ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) e->rb_node.rb_left : e->rb_node.rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (unlikely(!node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!(flags & WFE_RETURN_FOLLOWING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (read_original_sector(wc, e) >= block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) node = rb_next(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (unlikely(!node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) e = container_of(node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct wc_entry *e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (flags & WFE_LOWEST_SEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) node = rb_prev(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) node = rb_next(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (unlikely(!node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) e2 = container_of(node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (read_original_sector(wc, e2) != block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) e = e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) while (*node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) e = container_of(*node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) parent = &e->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (read_original_sector(wc, e) > read_original_sector(wc, ins))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) node = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) node = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rb_link_node(&ins->rb_node, parent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rb_insert_color(&ins->rb_node, &wc->tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) list_add(&ins->lru, &wc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ins->age = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rb_erase(&e->rb_node, &wc->tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (WC_MODE_SORT_FREELIST(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (unlikely(!*node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) wc->current_free = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) while (*node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) parent = *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (&e->rb_node < *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) node = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) node = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rb_link_node(&e->rb_node, parent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rb_insert_color(&e->rb_node, &wc->freetree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) list_add_tail(&e->lru, &wc->freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) wc->freelist_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static inline void writecache_verify_watermark(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) queue_work(wc->writeback_wq, &wc->writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void writecache_max_age_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) queue_work(wc->writeback_wq, &wc->writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (WC_MODE_SORT_FREELIST(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (unlikely(!wc->current_free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) e = wc->current_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) next = rb_next(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rb_erase(&e->rb_node, &wc->freetree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (unlikely(!next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) next = rb_first(&wc->freetree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (unlikely(list_empty(&wc->freelist)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) e = container_of(wc->freelist.next, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) wc->freelist_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) writecache_verify_watermark(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) writecache_unlink(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) writecache_add_to_freelist(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) clear_seq_count(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (unlikely(waitqueue_active(&wc->freelist_wait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) wake_up(&wc->freelist_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void writecache_wait_on_freelist(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) finish_wait(&wc->freelist_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static void writecache_poison_lists(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Catch incorrect access to these values while the device is suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) memset(&wc->tree, -1, sizeof wc->tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) wc->lru.next = LIST_POISON1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) wc->lru.prev = LIST_POISON2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) wc->freelist.next = LIST_POISON1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) wc->freelist.prev = LIST_POISON2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return read_seq_count(wc, e) < wc->seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void writecache_flush(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct wc_entry *e, *e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bool need_flush_after_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) wc->uncommitted_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) del_timer(&wc->autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (list_empty(&wc->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) e = container_of(wc->lru.next, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (writecache_entry_is_committed(wc, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (wc->overwrote_committed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) writecache_wait_for_ios(wc, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) writecache_disk_flush(wc, wc->ssd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) wc->overwrote_committed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) writecache_flush_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (unlikely(e->lru.next == &wc->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) e2 = container_of(e->lru.next, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (writecache_entry_is_committed(wc, e2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) e = e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) writecache_commit_flushed(wc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) wc->seq_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ssd_commit_superblock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) wc->overwrote_committed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) need_flush_after_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Free another committed entry with lower seq-count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct rb_node *rb_node = rb_prev(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) e2 = container_of(rb_node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) likely(!e2->write_in_progress)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) writecache_free_entry(wc, e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) need_flush_after_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (unlikely(e->lru.prev == &wc->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) e = container_of(e->lru.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (need_flush_after_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static void writecache_flush_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void writecache_autocommit_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) queue_work(wc->writeback_wq, &wc->flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static void writecache_schedule_autocommit(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!timer_pending(&wc->autocommit_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) bool discarded_something = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (unlikely(!e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) while (read_original_sector(wc, e) < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct rb_node *node = rb_next(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (likely(!e->write_in_progress)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!discarded_something) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) writecache_wait_for_ios(wc, READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) writecache_wait_for_ios(wc, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) discarded_something = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!writecache_entry_is_committed(wc, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) wc->uncommitted_blocks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) writecache_free_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (unlikely(!node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) e = container_of(node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (discarded_something)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static bool writecache_wait_for_writeback(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (wc->writeback_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) writecache_wait_on_freelist(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void writecache_suspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) bool flush_on_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) del_timer_sync(&wc->autocommit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) del_timer_sync(&wc->max_age_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) flush_on_suspend = wc->flush_on_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (flush_on_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) wc->flush_on_suspend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) wc->writeback_all++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) queue_work(wc->writeback_wq, &wc->writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) drain_workqueue(wc->writeback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (flush_on_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) wc->writeback_all--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) while (writecache_wait_for_writeback(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) writecache_poison_lists(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static int writecache_alloc_entries(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) size_t b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (wc->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!wc->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) for (b = 0; b < wc->n_blocks; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct wc_entry *e = &wc->entries[b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) e->index = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) e->write_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct dm_io_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct dm_io_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) region.bdev = wc->ssd_dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) region.sector = wc->start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) region.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) req.bi_op = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) req.bi_op_flags = REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) req.mem.type = DM_IO_VMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) req.mem.ptr.vma = (char *)wc->memory_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) req.client = wc->dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) req.notify.fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return dm_io(&req, 1, ®ion, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void writecache_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) size_t b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bool need_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) __le64 sb_seq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) r = writecache_read_metadata(wc, wc->metadata_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) size_t sb_entries_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) writecache_error(wc, r, "unable to read metadata: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) memset((char *)wc->memory_map + sb_entries_offset, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) wc->tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) INIT_LIST_HEAD(&wc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (WC_MODE_SORT_FREELIST(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) wc->freetree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) wc->current_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) INIT_LIST_HEAD(&wc->freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) wc->freelist_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) sb_seq_count = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) wc->seq_count = le64_to_cpu(sb_seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for (b = 0; b < wc->n_blocks; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct wc_entry *e = &wc->entries[b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct wc_memory_entry wme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) e->original_sector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) e->seq_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) sizeof(struct wc_memory_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) (unsigned long)b, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) e->original_sector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) e->seq_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) e->original_sector = le64_to_cpu(wme.original_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) e->seq_count = le64_to_cpu(wme.seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) for (b = 0; b < wc->n_blocks; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct wc_entry *e = &wc->entries[b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (!writecache_entry_is_committed(wc, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (read_seq_count(wc, e) != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) erase_this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) clear_seq_count(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) writecache_add_to_freelist(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct wc_entry *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) writecache_insert_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) writecache_error(wc, -EINVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) "two identical entries, position %llu, sector %llu, sequence %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) (unsigned long long)read_seq_count(wc, e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto erase_this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) writecache_free_entry(wc, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) writecache_insert_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (need_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) writecache_flush_all_metadata(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) writecache_verify_watermark(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (wc->max_age != MAX_AGE_UNSPECIFIED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (argc != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (dm_suspended(wc->ti)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) wc->writeback_all++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) queue_work(wc->writeback_wq, &wc->writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) flush_workqueue(wc->writeback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) wc->writeback_all--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (argc != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) wc->flush_on_suspend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void activate_cleaner(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) wc->flush_on_suspend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) wc->cleaner = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) wc->freelist_high_watermark = wc->n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) wc->freelist_low_watermark = wc->n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (argc != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) activate_cleaner(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!dm_suspended(wc->ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) writecache_verify_watermark(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (!strcasecmp(argv[0], "flush"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) r = process_flush_mesg(argc, argv, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) else if (!strcasecmp(argv[0], "flush_on_suspend"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) r = process_flush_on_suspend_mesg(argc, argv, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) else if (!strcasecmp(argv[0], "cleaner"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) r = process_cleaner_mesg(argc, argv, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) DMERR("unrecognised message received: %s", argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * clflushopt performs better with block size 1024, 2048, 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * non-temporal stores perform better with block size 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * block size 512 1024 2048 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * We see that movnti performs better for 512-byte blocks, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * clflushopt performs better for 1024-byte and larger blocks. So, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * prefer clflushopt for sizes >= 768.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * NOTE: this happens to be the case now (with dm-writecache's single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * threaded model) but re-evaluate this once memcpy_flushcache() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * enabled to use movdir64b which might invalidate this performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * advantage seen with cache-allocating-writes plus flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) likely(boot_cpu_data.x86_clflush_size == 64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) likely(size >= 768)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) memcpy((void *)dest, (void *)source, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) clflushopt((void *)dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dest += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) source += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) size -= 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) } while (size >= 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) memcpy_flushcache(dest, source, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) int rw = bio_data_dir(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) unsigned remaining_size = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) buf = bvec_kmap_irq(&bv, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) size = bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (unlikely(size > remaining_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) size = remaining_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (rw == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) r = copy_mc_to_kernel(buf, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) flush_dcache_page(bio_page(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) writecache_error(wc, r, "hardware memory error when reading data: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) flush_dcache_page(bio_page(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) memcpy_flushcache_optimized(data, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) bvec_kunmap_irq(buf, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) data = (char *)data + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) remaining_size -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) bio_advance(bio, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) } while (unlikely(remaining_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static int writecache_flush_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct dm_writecache *wc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) bio = bio_list_pop(&wc->flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (unlikely(kthread_should_stop())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (bio_op(bio) == REQ_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) writecache_discard(wc, bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) bio_end_sector(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) bio_set_dev(bio, wc->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (bio_list_empty(&wc->flush_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) wake_up_process(wc->flush_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) bio_list_add(&wc->flush_list, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int writecache_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) bio->bi_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) goto unlock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto unlock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (unlikely(wc->cleaner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) goto unlock_remap_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto unlock_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (dm_bio_get_target_bio_nr(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) goto unlock_remap_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) writecache_offload_bio(wc, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) goto unlock_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) (wc->block_size / 512 - 1)) != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) (unsigned long long)bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) bio->bi_iter.bi_size, wc->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) goto unlock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto unlock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) goto unlock_remap_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) writecache_offload_bio(wc, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) goto unlock_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (bio_data_dir(bio) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) read_next_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) bio_copy_block(wc, bio, memory_data(wc, e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (bio->bi_iter.bi_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) goto read_next_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto unlock_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) bio_set_dev(bio, wc->ssd_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) bio->bi_iter.bi_sector = cache_sector(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (!writecache_entry_is_committed(wc, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) writecache_wait_for_ios(wc, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) goto unlock_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sector_t next_boundary =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) read_original_sector(wc, e) - bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) dm_accept_partial_bio(bio, next_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) goto unlock_remap_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) bool found_entry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) bool search_used = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) goto unlock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (!writecache_entry_is_committed(wc, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) search_used = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) goto bio_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) wc->overwrote_committed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) search_used = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) goto bio_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) found_entry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (unlikely(wc->cleaner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) goto direct_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) e = writecache_pop_from_freelist(wc, (sector_t)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (unlikely(!e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (!WC_MODE_PMEM(wc) && !found_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) direct_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) BUG_ON(!next_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) dm_accept_partial_bio(bio, next_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) goto unlock_remap_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) writecache_wait_on_freelist(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) writecache_insert_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) wc->uncommitted_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) bio_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) bio_copy_block(wc, bio, memory_data(wc, e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) unsigned bio_size = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) sector_t start_cache_sec = cache_sector(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) while (bio_size < bio->bi_iter.bi_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (!search_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) (bio_size >> SECTOR_SHIFT), wc->seq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) writecache_insert_entry(wc, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) wc->uncommitted_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct wc_entry *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct rb_node *next = rb_next(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) f = container_of(next, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (f != e + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (read_original_sector(wc, f) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (unlikely(f->write_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (writecache_entry_is_committed(wc, f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) wc->overwrote_committed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) e = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) bio_size += wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) current_cache_sec += wc->block_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) bio_set_dev(bio, wc->ssd_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) bio->bi_iter.bi_sector = start_cache_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) wc->uncommitted_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) queue_work(wc->writeback_wq, &wc->flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) writecache_schedule_autocommit(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto unlock_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) } while (bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (unlikely(bio->bi_opf & REQ_FUA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) wc->uncommitted_blocks >= wc->autocommit_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) writecache_schedule_autocommit(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) goto unlock_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) unlock_remap_origin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) bio_set_dev(bio, wc->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) unlock_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* make sure that writecache_end_io decrements bio_in_progress: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) bio->bi_private = (void *)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) unlock_submit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) unlock_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) unlock_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (bio->bi_private != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int dir = bio_data_dir(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) wake_up(&wc->bio_in_progress_wait[dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static int writecache_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return fn(ti, wc->dev, 0, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (limits->logical_block_size < wc->block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) limits->logical_block_size = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (limits->physical_block_size < wc->block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) limits->physical_block_size = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (limits->io_min < wc->block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) limits->io_min = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static void writecache_writeback_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct dm_writecache *wc = wb->wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (unlikely(list_empty(&wc->endio_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) wake_up_process(wc->endio_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) list_add_tail(&wb->endio_entry, &wc->endio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct copy_struct *c = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct dm_writecache *wc = c->wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) raw_spin_lock_irq(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (unlikely(list_empty(&wc->endio_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) wake_up_process(wc->endio_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) list_add_tail(&c->endio_entry, &wc->endio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) raw_spin_unlock_irq(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct writeback_struct *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) unsigned long n_walked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) wb = list_entry(list->next, struct writeback_struct, endio_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) list_del(&wb->endio_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (unlikely(wb->bio.bi_status != BLK_STS_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) "write error %d", wb->bio.bi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) e = wb->wc_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) BUG_ON(!e->write_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) e->write_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) INIT_LIST_HEAD(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) writecache_free_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) BUG_ON(!wc->writeback_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) wc->writeback_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) n_walked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (unlikely(n_walked >= ENDIO_LATENCY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) n_walked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) } while (++i < wb->wc_list_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (wb->wc_list != wb->wc_list_inline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) kfree(wb->wc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) bio_put(&wb->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) } while (!list_empty(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct copy_struct *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct wc_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) c = list_entry(list->next, struct copy_struct, endio_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) list_del(&c->endio_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (unlikely(c->error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) writecache_error(wc, c->error, "copy error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) e = c->e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) BUG_ON(!e->write_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) e->write_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) INIT_LIST_HEAD(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (!writecache_has_error(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) writecache_free_entry(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) BUG_ON(!wc->writeback_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) wc->writeback_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) } while (--c->n_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) mempool_free(c, &wc->copy_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) } while (!list_empty(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static int writecache_endio_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct dm_writecache *wc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) raw_spin_lock_irq(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (!list_empty(&wc->endio_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) goto pop_from_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) raw_spin_unlock_irq(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (unlikely(kthread_should_stop())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) pop_from_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) list = wc->endio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) list.next->prev = list.prev->next = &list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) INIT_LIST_HEAD(&wc->endio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) raw_spin_unlock_irq(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (!WC_MODE_FUA(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) writecache_disk_flush(wc, wc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) __writecache_endio_pmem(wc, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) __writecache_endio_ssd(wc, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) writecache_wait_for_ios(wc, READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct dm_writecache *wc = wb->wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) unsigned block_size = wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) void *address = memory_data(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) persistent_memory_flush_cache(address, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return bio_add_page(&wb->bio, persistent_memory_page(address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) block_size, persistent_memory_page_offset(address)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct writeback_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (unlikely(wc->max_writeback_jobs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) writecache_wait_on_freelist(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct wc_entry *e, *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct writeback_struct *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) unsigned max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) while (wbl->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) wbl->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) e = container_of(wbl->list.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) max_pages = e->wc_list_contiguous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) wb = container_of(bio, struct writeback_struct, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) wb->wc = wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) bio->bi_end_io = writecache_writeback_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) bio_set_dev(bio, wc->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) bio->bi_iter.bi_sector = read_original_sector(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (max_pages <= WB_LIST_INLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) GFP_NOIO | __GFP_NORETRY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) wb->wc_list = wb->wc_list_inline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) max_pages = WB_LIST_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) wb->wc_list[0] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) wb->wc_list_n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) while (wbl->size && wb->wc_list_n < max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) f = container_of(wbl->list.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (read_original_sector(wc, f) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) wbl->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) list_del(&f->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) wb->wc_list[wb->wc_list_n++] = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) e = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) } else if (unlikely(!bio_sectors(bio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) bio->bi_status = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) __writeback_throttle(wc, wbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct wc_entry *e, *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct dm_io_region from, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct copy_struct *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) while (wbl->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) unsigned n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) wbl->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) e = container_of(wbl->list.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) from.bdev = wc->ssd_dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) from.sector = cache_sector(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) from.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) to.bdev = wc->dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) to.sector = read_original_sector(wc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) to.count = n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) c->wc = wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) c->e = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) c->n_entries = e->wc_list_contiguous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) wbl->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) f = container_of(wbl->list.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) BUG_ON(f != e + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) list_del(&f->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) e = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (to.sector >= wc->data_device_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) writecache_copy_endio(0, 0, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) from.count = to.count = wc->data_device_sectors - to.sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) __writeback_throttle(wc, wbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static void writecache_writeback(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct wc_entry *f, *g, *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) struct rb_node *node, *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct list_head skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct writeback_list wbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) unsigned long n_walked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (writecache_has_error(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (unlikely(wc->writeback_all)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (writecache_wait_for_writeback(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (wc->overwrote_committed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) writecache_wait_for_ios(wc, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) n_walked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) INIT_LIST_HEAD(&skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) INIT_LIST_HEAD(&wbl.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) wbl.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) while (!list_empty(&wc->lru) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) (wc->writeback_all ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) wc->max_age - wc->max_age / MAX_AGE_DIV))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) n_walked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (unlikely(n_walked > WRITEBACK_LATENCY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) queue_work(wc->writeback_wq, &wc->writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (unlikely(wc->writeback_all)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (unlikely(!e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) e = g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) e = container_of(wc->lru.prev, struct wc_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) BUG_ON(e->write_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (unlikely(!writecache_entry_is_committed(wc, e))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) writecache_flush(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) node = rb_prev(&e->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) f = container_of(node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (unlikely(read_original_sector(wc, f) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) read_original_sector(wc, e))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) BUG_ON(!f->write_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) list_add(&e->lru, &skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) wc->writeback_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) list_del(&e->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) list_add(&e->lru, &wbl.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) wbl.size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) e->write_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) e->wc_list_contiguous = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) f = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) next_node = rb_next(&f->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (unlikely(!next_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) g = container_of(next_node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (unlikely(read_original_sector(wc, g) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) read_original_sector(wc, f))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) f = g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (read_original_sector(wc, g) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (unlikely(g->write_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (unlikely(!writecache_entry_is_committed(wc, g)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (g != f + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) n_walked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) // break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) wc->writeback_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) list_del(&g->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) list_add(&g->lru, &wbl.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) wbl.size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) g->write_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) g->wc_list_contiguous = BIO_MAX_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) f = g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) e->wc_list_contiguous++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (unlikely(wc->writeback_all)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) next_node = rb_next(&f->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (likely(next_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) g = container_of(next_node, struct wc_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (!list_empty(&skipped)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) list_splice_tail(&skipped, &wc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * If we didn't do any progress, we must wait until some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * writeback finishes to avoid burning CPU in a loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (unlikely(!wbl.size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) writecache_wait_for_writeback(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) __writecache_writeback_pmem(wc, &wbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) __writecache_writeback_ssd(wc, &wbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (unlikely(wc->writeback_all)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) wc_lock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) while (writecache_wait_for_writeback(wc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) wc_unlock(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) static int calculate_memory_size(uint64_t device_size, unsigned block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) size_t *n_blocks_p, size_t *n_metadata_blocks_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) uint64_t n_blocks, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct wc_entry e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) n_blocks = device_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (!n_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /* Verify the following entries[n_blocks] won't overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) sizeof(struct wc_memory_entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (offset + n_blocks * block_size <= device_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) n_blocks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* check if the bit field overflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) e.index = n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (e.index != n_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (n_blocks_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) *n_blocks_p = n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (n_metadata_blocks_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) *n_metadata_blocks_p = offset >> __ffs(block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static int init_memory(struct dm_writecache *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) size_t b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) r = writecache_alloc_entries(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) for (b = 0; b < wc->n_blocks; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) writecache_flush_all_metadata(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) writecache_commit_flushed(wc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static void writecache_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (!wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (wc->endio_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) kthread_stop(wc->endio_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (wc->flush_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) kthread_stop(wc->flush_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) bioset_exit(&wc->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) mempool_exit(&wc->copy_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (wc->writeback_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) destroy_workqueue(wc->writeback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (wc->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) dm_put_device(ti, wc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (wc->ssd_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) dm_put_device(ti, wc->ssd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (wc->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) vfree(wc->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (wc->memory_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) persistent_memory_release(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) vfree(wc->memory_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (wc->dm_kcopyd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) dm_kcopyd_client_destroy(wc->dm_kcopyd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (wc->dm_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) dm_io_client_destroy(wc->dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (wc->dirty_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) vfree(wc->dirty_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) kfree(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct dm_writecache *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct dm_arg_set as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) const char *string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) unsigned opt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) size_t offset, data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) int high_wm_percent = HIGH_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int low_wm_percent = LOW_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) uint64_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct wc_memory_superblock s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) static struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) {0, 16, "Invalid number of feature args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) as.argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) as.argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (!wc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ti->error = "Cannot allocate writecache structure";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ti->private = wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) wc->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) mutex_init(&wc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) wc->max_age = MAX_AGE_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) writecache_poison_lists(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) init_waitqueue_head(&wc->freelist_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) atomic_set(&wc->bio_in_progress[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) init_waitqueue_head(&wc->bio_in_progress_wait[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) wc->dm_io = dm_io_client_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (IS_ERR(wc->dm_io)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) r = PTR_ERR(wc->dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ti->error = "Unable to allocate dm-io client";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) wc->dm_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (!wc->writeback_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) ti->error = "Could not allocate writeback workqueue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) INIT_WORK(&wc->writeback_work, writecache_writeback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) INIT_WORK(&wc->flush_work, writecache_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) raw_spin_lock_init(&wc->endio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) INIT_LIST_HEAD(&wc->endio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (IS_ERR(wc->endio_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) r = PTR_ERR(wc->endio_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) wc->endio_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) ti->error = "Couldn't spawn endio thread";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) wake_up_process(wc->endio_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * Parse the mode (pmem or ssd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (!string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) goto bad_arguments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!strcasecmp(string, "s")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) wc->pmem_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) } else if (!strcasecmp(string, "p")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) #ifdef DM_WRITECACHE_HAS_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) wc->pmem_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) wc->writeback_fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) * If the architecture doesn't support persistent memory or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) * the kernel doesn't support any DAX drivers, this driver can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) * only be used in SSD-only mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ti->error = "Persistent memory or DAX not supported on this system";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) goto bad_arguments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) offsetof(struct writeback_struct, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) ti->error = "Could not allocate bio set";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ti->error = "Could not allocate mempool";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * Parse the origin data device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (!string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) goto bad_arguments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ti->error = "Origin data device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * Parse cache data device (be it pmem or ssd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (!string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) goto bad_arguments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) ti->error = "Cache data device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * Parse the cache block size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (!string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) goto bad_arguments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) (wc->block_size & (wc->block_size - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ti->error = "Invalid block size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) ti->error = "Block size is smaller than device logical block size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) wc->block_size_bits = __ffs(wc->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * Parse optional arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) while (opt_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) unsigned long long start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) wc->start_sector = start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) wc->start_sector_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (wc->start_sector != start_sector ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (high_wm_percent < 0 || high_wm_percent > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) wc->high_wm_percent_value = high_wm_percent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) wc->high_wm_percent_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (low_wm_percent < 0 || low_wm_percent > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) wc->low_wm_percent_value = low_wm_percent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) wc->low_wm_percent_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) wc->max_writeback_jobs_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) wc->autocommit_blocks_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) unsigned autocommit_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (autocommit_msecs > 3600000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) wc->autocommit_time_value = autocommit_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) wc->autocommit_time_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) unsigned max_age_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) string = dm_shift_arg(&as), opt_params--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (max_age_msecs > 86400000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) wc->max_age = msecs_to_jiffies(max_age_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) wc->max_age_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) wc->max_age_value = max_age_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) } else if (!strcasecmp(string, "cleaner")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) wc->cleaner_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) wc->cleaner = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) } else if (!strcasecmp(string, "fua")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) wc->writeback_fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) wc->writeback_fua_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) } else goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) } else if (!strcasecmp(string, "nofua")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) wc->writeback_fua = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) wc->writeback_fua_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } else goto invalid_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) invalid_optional:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) ti->error = "Invalid optional argument";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (high_wm_percent < low_wm_percent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ti->error = "High watermark must be greater than or equal to low watermark";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (WC_MODE_PMEM(wc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ti->error = "Asynchronous persistent memory not supported as pmem cache";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) r = persistent_memory_claim(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) ti->error = "Unable to map persistent memory for cache";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) size_t n_blocks, n_metadata_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) uint64_t n_bitmap_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) bio_list_init(&wc->flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (IS_ERR(wc->flush_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) r = PTR_ERR(wc->flush_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) wc->flush_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ti->error = "Couldn't spawn flush thread";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) wake_up_process(wc->flush_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) r = calculate_memory_size(wc->memory_map_size, wc->block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) &n_blocks, &n_metadata_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) ti->error = "Invalid device size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) /* this is limitation of test_bit functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (n_bitmap_bits > 1U << 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) r = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) ti->error = "Invalid device size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (!wc->memory_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) ti->error = "Unable to allocate memory for metadata";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (IS_ERR(wc->dm_kcopyd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) r = PTR_ERR(wc->dm_kcopyd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) ti->error = "Unable to allocate dm-kcopyd client";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) wc->dm_kcopyd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) BITS_PER_LONG * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (!wc->dirty_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) ti->error = "Unable to allocate dirty bitmap";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) ti->error = "Unable to read first block of metadata";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) ti->error = "Hardware memory error when reading superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) r = init_memory(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) ti->error = "Unable to initialize device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) r = copy_mc_to_kernel(&s, sb(wc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) sizeof(struct wc_memory_superblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) ti->error = "Hardware memory error when reading superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) ti->error = "Invalid magic in the superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) ti->error = "Invalid version in the superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (le32_to_cpu(s.block_size) != wc->block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) ti->error = "Block size does not match superblock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) wc->n_blocks = le64_to_cpu(s.n_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) offset = wc->n_blocks * sizeof(struct wc_memory_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ti->error = "Overflow in size calculation";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) offset += sizeof(struct wc_memory_superblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (offset < sizeof(struct wc_memory_superblock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) goto overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) data_size = wc->n_blocks * (size_t)wc->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) (offset + data_size < offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) goto overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (offset + data_size > wc->memory_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) ti->error = "Memory area is too small";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) wc->metadata_sectors = offset >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) wc->block_start = (char *)sb(wc) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) x += 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) do_div(x, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) wc->freelist_high_watermark = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) x += 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) do_div(x, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) wc->freelist_low_watermark = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (wc->cleaner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) activate_cleaner(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) r = writecache_alloc_entries(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ti->error = "Cannot allocate memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) ti->flush_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) if (WC_MODE_PMEM(wc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) bad_arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) ti->error = "Bad arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) writecache_dtr(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) static void writecache_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) struct dm_writecache *wc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) unsigned extra_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) unsigned sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) (unsigned long long)wc->writeback_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) wc->dev->name, wc->ssd_dev->name, wc->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) extra_args = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (wc->start_sector_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (wc->high_wm_percent_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (wc->low_wm_percent_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (wc->max_writeback_jobs_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (wc->autocommit_blocks_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (wc->autocommit_time_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (wc->max_age_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) extra_args += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (wc->cleaner_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) extra_args++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (wc->writeback_fua_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) extra_args++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) DMEMIT("%u", extra_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (wc->start_sector_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (wc->high_wm_percent_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (wc->low_wm_percent_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (wc->max_writeback_jobs_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (wc->autocommit_blocks_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (wc->autocommit_time_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (wc->max_age_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) DMEMIT(" max_age %u", wc->max_age_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (wc->cleaner_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) DMEMIT(" cleaner");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (wc->writeback_fua_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) static struct target_type writecache_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) .name = "writecache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) .version = {1, 4, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) .ctr = writecache_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) .dtr = writecache_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) .status = writecache_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) .postsuspend = writecache_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) .resume = writecache_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) .message = writecache_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) .map = writecache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) .end_io = writecache_end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) .iterate_devices = writecache_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) .io_hints = writecache_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) static int __init dm_writecache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) r = dm_register_target(&writecache_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) DMERR("register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) static void __exit dm_writecache_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) dm_unregister_target(&writecache_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) module_init(dm_writecache_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) module_exit(dm_writecache_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) MODULE_DESCRIPTION(DM_NAME " writecache target");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) MODULE_LICENSE("GPL");