^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2009-2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Mikulas Patocka <mpatocka@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dm-bufio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dm-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/shrinker.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define DM_MSG_PREFIX "bufio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Memory management policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * dirty buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DM_BUFIO_MIN_BUFFERS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DM_BUFIO_MEMORY_PERCENT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DM_BUFIO_VMALLOC_PERCENT 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DM_BUFIO_WRITEBACK_RATIO 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DM_BUFIO_LOW_WATERMARK_RATIO 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Check buffer ages in this interval (seconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DM_BUFIO_WORK_TIMER_SECS 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Free buffers when they are older than this (seconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DM_BUFIO_DEFAULT_AGE_SECS 300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * The nr of bytes of cached data to keep around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Align buffer writes to this boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Tests show that SSDs have the highest IOPS when using 4k writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define DM_BUFIO_WRITE_ALIGN 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * dm_buffer->list_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define LIST_CLEAN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define LIST_DIRTY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define LIST_SIZE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Linking of buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * All buffers are linked to buffer_tree with their node field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Clean buffers that are not being written (B_WRITING not set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * are linked to lru[LIST_CLEAN] with their lru_list field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Dirty and clean buffers that are being written are linked to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * lru[LIST_DIRTY] with their lru_list field. When the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * finishes, the buffer cannot be relinked immediately (because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * are in an interrupt context and relinking requires process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * context), so some clean-not-writing buffers can be held on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * dirty_lru too. They are later added to lru in the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct dm_bufio_client {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct list_head lru[LIST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long n_buffers[LIST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) s8 sectors_per_block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void (*alloc_callback)(struct dm_buffer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void (*write_callback)(struct dm_buffer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct kmem_cache *slab_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct kmem_cache *slab_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct dm_io_client *dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct list_head reserved_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned need_reserved_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned minimum_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct rb_root buffer_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) wait_queue_head_t free_buffer_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) sector_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int async_write_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct list_head client_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct shrinker shrinker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct work_struct shrink_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) atomic_long_t need_shrink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Buffer state bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define B_READING 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define B_WRITING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define B_DIRTY 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Describes how the block was allocated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * kmem_cache_alloc(), __get_free_pages() or vmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * See the comment at alloc_buffer_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) enum data_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) DATA_MODE_SLAB = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DATA_MODE_GET_FREE_PAGES = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) DATA_MODE_VMALLOC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) DATA_MODE_LIMIT = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct dm_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct list_head lru_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct list_head global_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) sector_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned char data_mode; /* DATA_MODE_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned char list_mode; /* LIST_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) blk_status_t read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) blk_status_t write_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned hold_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long last_accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned dirty_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned dirty_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct dm_bufio_client *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct list_head write_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void (*end_io)(struct dm_buffer *, blk_status_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define MAX_STACK 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int stack_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long stack_entries[MAX_STACK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define dm_bufio_in_request() (!!current->bio_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void dm_bufio_lock(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mutex_lock_nested(&c->lock, dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int dm_bufio_trylock(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return mutex_trylock(&c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void dm_bufio_unlock(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mutex_unlock(&c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Default cache size: available memory divided by the ratio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static unsigned long dm_bufio_default_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Total cache size set by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static unsigned long dm_bufio_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * at any time. If it disagrees, the user has changed cache size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static unsigned long dm_bufio_cache_size_latch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static DEFINE_SPINLOCK(global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static LIST_HEAD(global_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static unsigned long global_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Buffers are freed after this timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static unsigned long dm_bufio_peak_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static unsigned long dm_bufio_allocated_kmem_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static unsigned long dm_bufio_allocated_get_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static unsigned long dm_bufio_allocated_vmalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static unsigned long dm_bufio_current_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * The current number of clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int dm_bufio_client_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * The list of all clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static LIST_HEAD(dm_bufio_all_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static DEFINE_MUTEX(dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static struct workqueue_struct *dm_bufio_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct delayed_work dm_bufio_cleanup_old_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct work_struct dm_bufio_replacement_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void buffer_record_stack(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * A red/black tree acts as an index for all the buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct rb_node *n = c->buffer_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) b = container_of(n, struct dm_buffer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (b->block == block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) n = block < b->block ? n->rb_left : n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct rb_node *n = c->buffer_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct dm_buffer *best = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) b = container_of(n, struct dm_buffer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (b->block == block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (block <= b->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) best = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct dm_buffer *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) found = container_of(*new, struct dm_buffer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (found->block == b->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) BUG_ON(found != b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) new = b->block < found->block ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) &found->node.rb_left : &found->node.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rb_link_node(&b->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rb_insert_color(&b->node, &c->buffer_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rb_erase(&b->node, &c->buffer_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned char data_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) &dm_bufio_allocated_kmem_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) &dm_bufio_allocated_get_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) &dm_bufio_allocated_vmalloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) data_mode = b->data_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) diff = (long)b->c->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) diff = -diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *class_ptr[data_mode] += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dm_bufio_current_allocated += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dm_bufio_peak_allocated = dm_bufio_current_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) b->accessed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!unlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) list_add(&b->global_list, &global_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) global_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (dm_bufio_current_allocated > dm_bufio_cache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_del(&b->global_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) global_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Change the number of clients and recalculate per-client limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void __cache_size_refresh(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) BUG_ON(dm_bufio_client_count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Use default if set to 0 and report the actual cache size used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!dm_bufio_cache_size_latch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (void)cmpxchg(&dm_bufio_cache_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dm_bufio_default_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Allocating buffer data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Small buffers are allocated with kmem_cache, to use space optimally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * For large buffers, we choose between get_free_pages and vmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Each has advantages and disadvantages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * __get_free_pages can randomly fail if the memory is fragmented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * as low as 128M) so using it for caching is not appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * If the allocation may fail we use __get_free_pages. Memory fragmentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * won't have a fatal effect here, but it just causes flushes of some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * buffers and more I/O will be performed. Don't use __get_free_pages if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * always fails (i.e. order >= MAX_ORDER).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * If the allocation shouldn't fail we use __vmalloc. This is only for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * initial reserve allocation, so there's no risk of wasting all vmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned char *data_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (unlikely(c->slab_cache != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *data_mode = DATA_MODE_SLAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return kmem_cache_alloc(c->slab_cache, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (c->block_size <= KMALLOC_MAX_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) gfp_mask & __GFP_NORETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *data_mode = DATA_MODE_GET_FREE_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return (void *)__get_free_pages(gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *data_mode = DATA_MODE_VMALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * __vmalloc allocates the data pages and auxiliary structures with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * gfp_flags that were specified, but pagetables are always allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * with GFP_KERNEL, no matter what was specified as gfp_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * all allocations done by this process (including pagetables) are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * as if GFP_NOIO was specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (gfp_mask & __GFP_NORETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned noio_flag = memalloc_noio_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) void *ptr = __vmalloc(c->block_size, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) memalloc_noio_restore(noio_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return __vmalloc(c->block_size, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Free buffer's data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void free_buffer_data(struct dm_bufio_client *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void *data, unsigned char data_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) switch (data_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case DATA_MODE_SLAB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) kmem_cache_free(c->slab_cache, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case DATA_MODE_GET_FREE_PAGES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) free_pages((unsigned long)data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case DATA_MODE_VMALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) vfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) data_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Allocate buffer and its data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) b->c = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!b->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) kmem_cache_free(c->slab_buffer, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) b->stack_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Free buffer and its data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void free_buffer(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) free_buffer_data(c, b->data, b->data_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) kmem_cache_free(c->slab_buffer, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Link buffer to the buffer tree and clean or dirty queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) c->n_buffers[dirty]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) b->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) b->list_mode = dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) list_add(&b->lru_list, &c->lru[dirty]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __insert(b->c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) b->last_accessed = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) adjust_total_allocated(b, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Unlink buffer from the buffer tree and dirty or clean queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void __unlink_buffer(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) BUG_ON(!c->n_buffers[b->list_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) c->n_buffers[b->list_mode]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) __remove(b->c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) list_del(&b->lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) adjust_total_allocated(b, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Place the buffer to the head of dirty or clean LRU queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void __relink_lru(struct dm_buffer *b, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) b->accessed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) BUG_ON(!c->n_buffers[b->list_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) c->n_buffers[b->list_mode]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) c->n_buffers[dirty]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) b->list_mode = dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) list_move(&b->lru_list, &c->lru[dirty]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) b->last_accessed = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Submit I/O on the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Bio interface is faster but it has some problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * the vector list is limited (increasing this limit increases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * memory-consumption per buffer, so it is not viable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * the memory must be direct-mapped, not vmalloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * it is not vmalloced, try using the bio interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * If the buffer is big, if it is vmalloced or if the underlying device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * rejects the bio because it is too large, use dm-io layer to do the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * The dm-io layer splits the I/O into multiple requests, avoiding the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * shortcomings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * that the request was handled directly with bio interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void dmio_complete(unsigned long error, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct dm_buffer *b = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned n_sectors, unsigned offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct dm_io_request io_req = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .bi_op = rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) .bi_op_flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) .notify.fn = dmio_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) .notify.context = b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .client = b->c->dm_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct dm_io_region region = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .bdev = b->c->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .sector = sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .count = n_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (b->data_mode != DATA_MODE_VMALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) io_req.mem.type = DM_IO_KMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) io_req.mem.ptr.addr = (char *)b->data + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) io_req.mem.type = DM_IO_VMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) io_req.mem.ptr.vma = (char *)b->data + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) r = dm_io(&io_req, 1, ®ion, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) b->end_io(b, errno_to_blk_status(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void bio_complete(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct dm_buffer *b = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) blk_status_t status = bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) b->end_io(b, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned n_sectors, unsigned offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned vec_size, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) vec_size = b->c->block_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) vec_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dmio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) use_dmio(b, rw, sector, n_sectors, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bio_set_dev(bio, b->c->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) bio_set_op_attrs(bio, rw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) bio->bi_end_io = bio_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) bio->bi_private = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ptr = (char *)b->data + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) len = n_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!bio_add_page(bio, virt_to_page(ptr), this_step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) offset_in_page(ptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto dmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) len -= this_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ptr += this_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) } while (len > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (likely(c->sectors_per_block_bits >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) sector = block << c->sectors_per_block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) sector = block * (c->block_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) sector += c->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) unsigned n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned offset, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) b->end_io = end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) sector = block_to_sector(b->c, b->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (rw != REQ_OP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) n_sectors = b->c->block_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (b->c->write_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) b->c->write_callback(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) offset = b->write_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) end = b->write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) offset &= -DM_BUFIO_WRITE_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) end += DM_BUFIO_WRITE_ALIGN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) end &= -DM_BUFIO_WRITE_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (unlikely(end > b->c->block_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) end = b->c->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sector += offset >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) n_sectors = (end - offset) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (b->data_mode != DATA_MODE_VMALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) use_bio(b, rw, sector, n_sectors, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) use_dmio(b, rw, sector, n_sectors, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * Writing dirty buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * The endio routine for write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Set the error, clear B_WRITING bit and wake anyone who was waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void write_endio(struct dm_buffer *b, blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) b->write_error = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (unlikely(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) (void)cmpxchg(&c->async_write_error, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) blk_status_to_errno(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) BUG_ON(!test_bit(B_WRITING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) clear_bit(B_WRITING, &b->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) wake_up_bit(&b->state, B_WRITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Initiate a write on a dirty buffer, but don't wait for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * - If the buffer is not dirty, exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * - If there some previous write going on, wait for it to finish (we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * have two writes on the same buffer simultaneously).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * - Submit our write and don't wait on it. We set B_WRITING indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * that there is a write in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void __write_dirty_buffer(struct dm_buffer *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct list_head *write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!test_bit(B_DIRTY, &b->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) clear_bit(B_DIRTY, &b->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) b->write_start = b->dirty_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) b->write_end = b->dirty_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) submit_io(b, REQ_OP_WRITE, write_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) list_add_tail(&b->write_list, write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void __flush_write_list(struct list_head *write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) while (!list_empty(write_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct dm_buffer *b =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) list_entry(write_list->next, struct dm_buffer, write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) list_del(&b->write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) submit_io(b, REQ_OP_WRITE, write_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * Wait until any activity on the buffer finishes. Possibly write the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * buffer if it is dirty. When this function finishes, there is no I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * running on the buffer and the buffer is not dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static void __make_buffer_clean(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) BUG_ON(b->hold_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!b->state) /* fast case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) __write_dirty_buffer(b, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Find some buffer that is not held by anybody, clean it, unlink it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) BUG_ON(test_bit(B_WRITING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) BUG_ON(test_bit(B_DIRTY, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!b->hold_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) __make_buffer_clean(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) BUG_ON(test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!b->hold_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) __make_buffer_clean(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Wait until some other threads free some buffer or release hold count on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * some buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * This function is entered with c->lock held, drops it and regains it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * before exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static void __wait_for_free_buffer(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) add_wait_queue(&c->free_buffer_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) remove_wait_queue(&c->free_buffer_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) enum new_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) NF_FRESH = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) NF_READ = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) NF_GET = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) NF_PREFETCH = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Allocate a new buffer. If the allocation is not possible, wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * some other thread frees a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * May drop the lock and regain it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) bool tried_noio_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * dm-bufio is resistant to allocation failures (it just keeps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * one buffer reserved in cases all the allocations fail).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * So set flags to not try too hard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * mutex and wait ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * __GFP_NORETRY: don't retry and rather return failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * __GFP_NOMEMALLOC: don't use emergency reserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * __GFP_NOWARN: don't print a warning in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * For debugging, if we set the cache size to 1, no new buffers will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (dm_bufio_cache_size_latch != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (nf == NF_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) tried_noio_alloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!list_empty(&c->reserved_buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) b = list_entry(c->reserved_buffers.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct dm_buffer, lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_del(&b->lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) c->need_reserved_buffers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) b = __get_unclaimed_buffer(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) __wait_for_free_buffer(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (c->alloc_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) c->alloc_callback(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Free a buffer and wake other threads waiting for free buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static void __free_buffer_wake(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!c->need_reserved_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) free_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) list_add(&b->lru_list, &c->reserved_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) c->need_reserved_buffers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) wake_up(&c->free_buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct list_head *write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct dm_buffer *b, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) BUG_ON(test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (!test_bit(B_DIRTY, &b->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) !test_bit(B_WRITING, &b->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) __relink_lru(b, LIST_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (no_wait && test_bit(B_WRITING, &b->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) __write_dirty_buffer(b, write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * Check if we're over watermark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * If we are over threshold_buffers, start freeing buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * If we're over "limit_buffers", block until we get under the limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static void __check_watermark(struct dm_bufio_client *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct list_head *write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) __write_dirty_buffers_async(c, 1, write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Getting a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) enum new_flag nf, int *need_submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct list_head *write_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct dm_buffer *b, *new_b = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *need_submit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) b = __find(c, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto found_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (nf == NF_GET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) new_b = __alloc_buffer_wait(c, nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (!new_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * We've had a period where the mutex was unlocked, so need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * recheck the buffer tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) b = __find(c, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) __free_buffer_wake(new_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) goto found_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) __check_watermark(c, write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) b = new_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) b->hold_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) b->read_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) b->write_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) __link_buffer(b, block, LIST_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (nf == NF_FRESH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) b->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) b->state = 1 << B_READING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *need_submit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) found_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (nf == NF_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Note: it is essential that we don't wait for the buffer to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * read if dm_bufio_get function is used. Both dm_bufio_get and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * dm_bufio_prefetch can be used in the driver request routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * If the user called both dm_bufio_prefetch and dm_bufio_get on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * the same buffer, it would deadlock if we waited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) b->hold_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) test_bit(B_WRITING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * The endio routine for reading: set the error, clear the bit and wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * anyone waiting on the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static void read_endio(struct dm_buffer *b, blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) b->read_error = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) BUG_ON(!test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) clear_bit(B_READING, &b->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) wake_up_bit(&b->state, B_READING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * functions is similar except that dm_bufio_new doesn't read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * buffer from the disk (assuming that the caller overwrites all the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * and uses dm_bufio_mark_buffer_dirty to write new data back).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void *new_read(struct dm_bufio_client *c, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) enum new_flag nf, struct dm_buffer **bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int need_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) LIST_HEAD(write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) b = __bufio_new(c, block, nf, &need_submit, &write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (b && b->hold_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) buffer_record_stack(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) __flush_write_list(&write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (need_submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) submit_io(b, REQ_OP_READ, read_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (b->read_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int error = blk_status_to_errno(b->read_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dm_bufio_release(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) *bp = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return b->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct dm_buffer **bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return new_read(c, block, NF_GET, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) EXPORT_SYMBOL_GPL(dm_bufio_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct dm_buffer **bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return new_read(c, block, NF_READ, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) EXPORT_SYMBOL_GPL(dm_bufio_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct dm_buffer **bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return new_read(c, block, NF_FRESH, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXPORT_SYMBOL_GPL(dm_bufio_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) void dm_bufio_prefetch(struct dm_bufio_client *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) sector_t block, unsigned n_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) LIST_HEAD(write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) for (; n_blocks--; block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int need_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) &write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (unlikely(!list_empty(&write_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) __flush_write_list(&write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (unlikely(b != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (need_submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) submit_io(b, REQ_OP_READ, read_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) dm_bufio_release(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!n_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) goto flush_plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) flush_plug:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) void dm_bufio_release(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) BUG_ON(!b->hold_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) b->hold_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!b->hold_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) wake_up(&c->free_buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * If there were errors on the buffer, and the buffer is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * to be written, free the buffer. There is no point in caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * invalid buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if ((b->read_error || b->write_error) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) !test_bit(B_READING, &b->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) !test_bit(B_WRITING, &b->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) !test_bit(B_DIRTY, &b->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) EXPORT_SYMBOL_GPL(dm_bufio_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) unsigned start, unsigned end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) BUG_ON(start >= end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) BUG_ON(end > b->c->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) BUG_ON(test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!test_and_set_bit(B_DIRTY, &b->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) b->dirty_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) b->dirty_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) __relink_lru(b, LIST_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (start < b->dirty_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) b->dirty_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (end > b->dirty_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) b->dirty_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) LIST_HEAD(write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) __write_dirty_buffers_async(c, 0, &write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) __flush_write_list(&write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * For performance, it is essential that the buffers are written asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * and simultaneously (so that the block layer can merge the writes) and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * waited upon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * Finally, we flush hardware disk cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int a, f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) unsigned long buffers_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct dm_buffer *b, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) LIST_HEAD(write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) __write_dirty_buffers_async(c, 0, &write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) __flush_write_list(&write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int dropped_lock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (buffers_processed < c->n_buffers[LIST_DIRTY])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) buffers_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) BUG_ON(test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (test_bit(B_WRITING, &b->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) dropped_lock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) b->hold_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) wait_on_bit_io(&b->state, B_WRITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) b->hold_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) wait_on_bit_io(&b->state, B_WRITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (!test_bit(B_DIRTY, &b->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) !test_bit(B_WRITING, &b->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) __relink_lru(b, LIST_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * If we dropped the lock, the list is no longer consistent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * so we must restart the search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * In the most common case, the buffer just processed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * relinked to the clean list, so we won't loop scanning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * same buffer again and again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * This may livelock if there is another thread simultaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * dirtying buffers, so we count the number of buffers walked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * and if it exceeds the total number of buffers, it means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * someone is doing some writes simultaneously with us. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * this case, stop, dropping the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (dropped_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) wake_up(&c->free_buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) a = xchg(&c->async_write_error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) f = dm_bufio_issue_flush(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Use dm-io to send an empty barrier to flush the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) int dm_bufio_issue_flush(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct dm_io_request io_req = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .bi_op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .mem.type = DM_IO_KMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .mem.ptr.addr = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .client = c->dm_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct dm_io_region io_reg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .bdev = c->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .sector = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .count = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return dm_io(&io_req, 1, &io_reg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * Use dm-io to send a discard request to flush the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct dm_io_request io_req = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) .bi_op = REQ_OP_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .bi_op_flags = REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .mem.type = DM_IO_KMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .mem.ptr.addr = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) .client = c->dm_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct dm_io_region io_reg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) .bdev = c->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .sector = block_to_sector(c, block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) .count = block_to_sector(c, count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return dm_io(&io_req, 1, &io_reg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * We first delete any other buffer that may be at that new location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Then, we write the buffer to the original location if it was dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * Then, if we are the only one who is holding the buffer, relink the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * in the buffer tree for the new location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * If there was someone else holding the buffer, we write it to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * location but not relink it, because that other user needs to have the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * at the same place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct dm_bufio_client *c = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct dm_buffer *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) new = __find(c, new_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (new->hold_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) __wait_for_free_buffer(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * FIXME: Is there any point waiting for a write that's going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * to be overwritten in a bit?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) __make_buffer_clean(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) __unlink_buffer(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) __free_buffer_wake(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) BUG_ON(!b->hold_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) BUG_ON(test_bit(B_READING, &b->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) __write_dirty_buffer(b, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (b->hold_count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) wait_on_bit_io(&b->state, B_WRITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) set_bit(B_DIRTY, &b->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) b->dirty_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) b->dirty_end = c->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) __link_buffer(b, new_block, LIST_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) sector_t old_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) wait_on_bit_lock_io(&b->state, B_WRITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * Relink buffer to "new_block" so that write_callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * sees "new_block" as a block number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * After the write, link the buffer back to old_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * All this must be done in bufio lock, so that block number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * change isn't visible to other threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) old_block = b->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) __link_buffer(b, new_block, b->list_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) submit_io(b, REQ_OP_WRITE, write_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) wait_on_bit_io(&b->state, B_WRITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) __link_buffer(b, old_block, b->list_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) dm_bufio_release(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) EXPORT_SYMBOL_GPL(dm_bufio_release_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static void forget_buffer_locked(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (likely(!b->hold_count) && likely(!b->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * Free the given buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * This is just a hint, if the buffer is in use or dirty, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * does nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) b = __find(c, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) forget_buffer_locked(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) EXPORT_SYMBOL_GPL(dm_bufio_forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) sector_t end_block = block + n_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) while (block < end_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) b = __find_next(c, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) block = b->block + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) forget_buffer_locked(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) c->minimum_buffers = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return c->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (s >= c->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) s -= c->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (likely(c->sectors_per_block_bits >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) s >>= c->sectors_per_block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) sector_div(s, c->block_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return c->dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) sector_t dm_bufio_get_block_number(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return b->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) void *dm_bufio_get_block_data(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return b->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) void *dm_bufio_get_aux_data(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return b + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) EXPORT_SYMBOL_GPL(dm_bufio_get_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static void drop_buffers(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) bool warned = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) BUG_ON(dm_bufio_in_request());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * An optimization so that the buffers are not written one-by-one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dm_bufio_write_dirty_buffers_async(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) while ((b = __get_unclaimed_buffer(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) for (i = 0; i < LIST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) list_for_each_entry(b, &c->lru[i], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) WARN_ON(!warned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) warned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) DMERR("leaked buffer %llx, hold count %u, list %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) (unsigned long long)b->block, b->hold_count, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) stack_trace_print(b->stack_entries, b->stack_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* mark unclaimed to avoid BUG_ON below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) b->hold_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) while ((b = __get_unclaimed_buffer(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) for (i = 0; i < LIST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) BUG_ON(!list_empty(&c->lru[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * We may not be able to evict this buffer if IO pending or the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * is still using it. Caller is expected to know buffer is too old.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * And if GFP_NOFS is used, we must not do any I/O because we hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * rerouted to different bufio client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (!(gfp & __GFP_FS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (test_bit(B_READING, &b->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) test_bit(B_WRITING, &b->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) test_bit(B_DIRTY, &b->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (b->hold_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) __make_buffer_clean(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) __unlink_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static unsigned long get_retain_buffers(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (likely(c->sectors_per_block_bits >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) retain_bytes /= c->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return retain_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void __scan(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct dm_buffer *b, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) unsigned long freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) unsigned long count = c->n_buffers[LIST_CLEAN] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) c->n_buffers[LIST_DIRTY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) unsigned long retain_target = get_retain_buffers(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) for (l = 0; l < LIST_SIZE; l++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (count - freed <= retain_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) atomic_long_set(&c->need_shrink, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (!atomic_long_read(&c->need_shrink))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (__try_evict_buffer(b, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) atomic_long_dec(&c->need_shrink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static void shrink_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) __scan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct dm_bufio_client *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) c = container_of(shrink, struct dm_bufio_client, shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) atomic_long_add(sc->nr_to_scan, &c->need_shrink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) queue_work(dm_bufio_wq, &c->shrink_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return sc->nr_to_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) READ_ONCE(c->n_buffers[LIST_DIRTY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) unsigned long retain_target = get_retain_buffers(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (unlikely(count < retain_target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) count -= retain_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (unlikely(count < queued_for_cleanup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) count -= queued_for_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * Create the buffering interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) unsigned reserved_buffers, unsigned aux_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) void (*alloc_callback)(struct dm_buffer *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) void (*write_callback)(struct dm_buffer *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct dm_bufio_client *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) char slab_name[27];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) DMERR("%s: block size not specified or is not multiple of 512b", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) goto bad_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) c = kzalloc(sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) goto bad_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) c->buffer_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) c->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) c->block_size = block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (is_power_of_2(block_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) c->sectors_per_block_bits = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) c->alloc_callback = alloc_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) c->write_callback = write_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) for (i = 0; i < LIST_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) INIT_LIST_HEAD(&c->lru[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) c->n_buffers[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) mutex_init(&c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) INIT_LIST_HEAD(&c->reserved_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) c->need_reserved_buffers = reserved_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) init_waitqueue_head(&c->free_buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) c->async_write_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) c->dm_io = dm_io_client_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (IS_ERR(c->dm_io)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) r = PTR_ERR(c->dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) goto bad_dm_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (block_size <= KMALLOC_MAX_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) c->slab_cache = kmem_cache_create(slab_name, block_size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) SLAB_RECLAIM_ACCOUNT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!c->slab_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (aux_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 0, SLAB_RECLAIM_ACCOUNT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!c->slab_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) while (c->need_reserved_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (!b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) __free_buffer_wake(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) INIT_WORK(&c->shrink_work, shrink_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) atomic_long_set(&c->need_shrink, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) c->shrinker.count_objects = dm_bufio_shrink_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) c->shrinker.scan_objects = dm_bufio_shrink_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) c->shrinker.seeks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) c->shrinker.batch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) r = register_shrinker(&c->shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) mutex_lock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) dm_bufio_client_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) list_add(&c->client_list, &dm_bufio_all_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) __cache_size_refresh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) mutex_unlock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) while (!list_empty(&c->reserved_buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) struct dm_buffer *b = list_entry(c->reserved_buffers.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct dm_buffer, lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) list_del(&b->lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) free_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) kmem_cache_destroy(c->slab_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) kmem_cache_destroy(c->slab_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) dm_io_client_destroy(c->dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) bad_dm_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) mutex_destroy(&c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) bad_client:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) EXPORT_SYMBOL_GPL(dm_bufio_client_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * Free the buffering interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * It is required that there are no references on any buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) void dm_bufio_client_destroy(struct dm_bufio_client *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) drop_buffers(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) unregister_shrinker(&c->shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) flush_work(&c->shrink_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) mutex_lock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) list_del(&c->client_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) dm_bufio_client_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) __cache_size_refresh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) mutex_unlock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) BUG_ON(c->need_reserved_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) while (!list_empty(&c->reserved_buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct dm_buffer *b = list_entry(c->reserved_buffers.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct dm_buffer, lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) list_del(&b->lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) free_buffer(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) for (i = 0; i < LIST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (c->n_buffers[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) for (i = 0; i < LIST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) BUG_ON(c->n_buffers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) kmem_cache_destroy(c->slab_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) kmem_cache_destroy(c->slab_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dm_io_client_destroy(c->dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) mutex_destroy(&c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) c->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static unsigned get_max_age_hz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) unsigned max_age = READ_ONCE(dm_bufio_max_age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (max_age > UINT_MAX / HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) max_age = UINT_MAX / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return max_age * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static bool older_than(struct dm_buffer *b, unsigned long age_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return time_after_eq(jiffies, b->last_accessed + age_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct dm_buffer *b, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) unsigned long retain_target = get_retain_buffers(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) LIST_HEAD(write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) __check_watermark(c, &write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (unlikely(!list_empty(&write_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) __flush_write_list(&write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) dm_bufio_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (count <= retain_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (!older_than(b, age_hz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (__try_evict_buffer(b, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) dm_bufio_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) static void do_global_cleanup(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) struct dm_bufio_client *locked_client = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) struct dm_bufio_client *current_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) struct dm_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) unsigned spinlock_hold_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) unsigned long threshold = dm_bufio_cache_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) unsigned long loops = global_num * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) mutex_lock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) spin_lock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (unlikely(dm_bufio_current_allocated <= threshold))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) spinlock_hold_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) get_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (!loops--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (unlikely(list_empty(&global_queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) b = list_entry(global_queue.prev, struct dm_buffer, global_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (b->accessed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) b->accessed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) list_move(&b->global_list, &global_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (likely(++spinlock_hold_count < 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto get_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) current_client = b->c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (unlikely(current_client != locked_client)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (locked_client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) dm_bufio_unlock(locked_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (!dm_bufio_trylock(current_client)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) dm_bufio_lock(current_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) locked_client = current_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) locked_client = current_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) spin_lock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) list_move(&b->global_list, &global_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) spin_unlock(&global_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (locked_client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) dm_bufio_unlock(locked_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) mutex_unlock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) static void cleanup_old_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) unsigned long max_age_hz = get_max_age_hz();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct dm_bufio_client *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) mutex_lock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) __cache_size_refresh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) list_for_each_entry(c, &dm_bufio_all_clients, client_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) __evict_old_buffers(c, max_age_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) mutex_unlock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static void work_fn(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) cleanup_old_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) DM_BUFIO_WORK_TIMER_SECS * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * Module setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * This is called only once for the whole dm_bufio module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * It initializes memory limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static int __init dm_bufio_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) __u64 mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) dm_bufio_allocated_kmem_cache = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) dm_bufio_allocated_get_free_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) dm_bufio_allocated_vmalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) dm_bufio_current_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (mem > ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) mem = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) dm_bufio_default_cache_size = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) mutex_lock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) __cache_size_refresh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) mutex_unlock(&dm_bufio_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (!dm_bufio_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) DM_BUFIO_WORK_TIMER_SECS * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * This is called once when unloading the dm_bufio module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static void __exit dm_bufio_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) int bug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) flush_workqueue(dm_bufio_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) destroy_workqueue(dm_bufio_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (dm_bufio_client_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) DMCRIT("%s: dm_bufio_client_count leaked: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) __func__, dm_bufio_client_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (dm_bufio_current_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) __func__, dm_bufio_current_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (dm_bufio_allocated_get_free_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) __func__, dm_bufio_allocated_get_free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (dm_bufio_allocated_vmalloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) __func__, dm_bufio_allocated_vmalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) BUG_ON(bug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) module_init(dm_bufio_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) module_exit(dm_bufio_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) MODULE_LICENSE("GPL");