^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Internal header file for device mapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is released under the LGPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef DM_RQ_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define DM_RQ_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "dm-stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct mapped_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * For request-based dm - the bio clones we allocate are embedded in these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * We allocate these with bio_alloc_bioset, using the front_pad parameter when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * the bioset is created - this means the bio has to come at the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct dm_rq_clone_bio_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct bio *orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct dm_rq_target_io *tio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct bio clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void dm_mq_cleanup_mapped_device(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void dm_start_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void dm_stop_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void dm_mq_kick_requeue_list(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned dm_get_reserved_rq_based_ios(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) const char *buf, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif