^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2005-2007 Red Hat GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * A target that delays reads and/or writes and can send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * them to different devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define DM_MSG_PREFIX "delay"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct delay_class {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) sector_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct delay_c {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct timer_list delay_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct mutex timer_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct workqueue_struct *kdelayd_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct work_struct flush_expired_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct list_head delayed_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) atomic_t may_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct delay_class read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct delay_class write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct delay_class flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct dm_delay_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct delay_c *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct delay_class *class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static DEFINE_MUTEX(delayed_bios_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void handle_delayed_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct delay_c *dc = from_timer(dc, t, delay_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void queue_timeout(struct delay_c *dc, unsigned long expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mutex_lock(&dc->timer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mod_timer(&dc->delay_timer, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mutex_unlock(&dc->timer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void flush_bios(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct bio *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) n = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bio = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct dm_delay_info *delayed, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long next_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned long start_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct bio_list flush_bios = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mutex_lock(&delayed_bios_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (flush_all || time_after_eq(jiffies, delayed->expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct bio *bio = dm_bio_from_per_bio_data(delayed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) sizeof(struct dm_delay_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) list_del(&delayed->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bio_list_add(&flush_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) delayed->class->ops--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!start_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) start_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) next_expires = delayed->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) next_expires = min(next_expires, delayed->expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mutex_unlock(&delayed_bios_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (start_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) queue_timeout(dc, next_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return bio_list_get(&flush_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void flush_expired_bios(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct delay_c *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dc = container_of(work, struct delay_c, flush_expired_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) flush_bios(flush_delayed_bios(dc, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void delay_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (dc->kdelayd_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) destroy_workqueue(dc->kdelayd_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (dc->read.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dm_put_device(ti, dc->read.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (dc->write.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dm_put_device(ti, dc->write.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (dc->flush.dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dm_put_device(ti, dc->flush.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mutex_destroy(&dc->timer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) kfree(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long long tmpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ti->error = "Invalid device sector";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) c->start = tmpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ti->error = "Invalid delay";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ti->error = "Device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Mapping parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * With separate write parameters, the first set is only used for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Offsets are specified in sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Delays are specified in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct delay_c *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (argc != 3 && argc != 6 && argc != 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ti->error = "Requires exactly 3, 6 or 9 arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dc = kzalloc(sizeof(*dc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!dc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ti->error = "Cannot allocate context";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ti->private = dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) INIT_LIST_HEAD(&dc->delayed_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mutex_init(&dc->timer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) atomic_set(&dc->may_delay, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dc->argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ret = delay_class_ctr(ti, &dc->read, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (argc == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = delay_class_ctr(ti, &dc->write, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = delay_class_ctr(ti, &dc->flush, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = delay_class_ctr(ti, &dc->write, argv + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (argc == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = delay_class_ctr(ti, &dc->flush, argv + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = delay_class_ctr(ti, &dc->flush, argv + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!dc->kdelayd_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) DMERR("Couldn't start kdelayd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ti->per_io_data_size = sizeof(struct dm_delay_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) delay_dtr(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct dm_delay_info *delayed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!c->delay || !atomic_read(&dc->may_delay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) delayed->context = dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mutex_lock(&delayed_bios_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) c->ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) list_add_tail(&delayed->list, &dc->delayed_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mutex_unlock(&delayed_bios_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) queue_timeout(dc, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void delay_presuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) atomic_set(&dc->may_delay, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) del_timer_sync(&dc->delay_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) flush_bios(flush_delayed_bios(dc, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void delay_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) atomic_set(&dc->may_delay, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int delay_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct delay_class *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (bio_data_dir(bio) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (unlikely(bio->bi_opf & REQ_PREFLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) c = &dc->flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) c = &dc->write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) c = &dc->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) delayed->class = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) bio_set_dev(bio, c->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (bio_sectors(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return delay_bio(dc, c, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define DMEMIT_DELAY_CLASS(c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void delay_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) DMEMIT_DELAY_CLASS(&dc->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (dc->argc >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) DMEMIT(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) DMEMIT_DELAY_CLASS(&dc->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (dc->argc >= 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) DMEMIT(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) DMEMIT_DELAY_CLASS(&dc->flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int delay_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct delay_c *dc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static struct target_type delay_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .name = "delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .version = {1, 2, 1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .features = DM_TARGET_PASSES_INTEGRITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .ctr = delay_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .dtr = delay_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .map = delay_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .presuspend = delay_presuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) .resume = delay_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) .status = delay_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .iterate_devices = delay_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int __init dm_delay_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) r = dm_register_target(&delay_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) DMERR("register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto bad_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bad_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void __exit dm_delay_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dm_unregister_target(&delay_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Module hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) module_init(dm_delay_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) module_exit(dm_delay_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) MODULE_DESCRIPTION(DM_NAME " delay target");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) MODULE_LICENSE("GPL");