^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Block device elevator/IO-scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * 30042000 Jens Axboe <axboe@kernel.dk> :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Split the elevator a bit so that it is possible to choose a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * one or even write a new "plug in". There are three pieces:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * - elevator_fn, inserts a new request in the queue list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * - elevator_merge_fn, decides whether a new buffer can be merged with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * an existing request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - elevator_dequeue_fn, called when a request is taken off the active list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 20082000 Dave Jones <davej@suse.de> :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Removed tests for max-bomb-segments, which was breaking elvtune
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * when run without -bN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Jens:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * - Rework again to work with bio instead of buffer_heads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * - loose bi_dev comparisons, partition handling is right now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - completely modularize elevator setup and teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/elevator.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/blktrace_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/blk-cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "blk-mq-sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "blk-pm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "blk-wbt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static DEFINE_SPINLOCK(elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static LIST_HEAD(elv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Merge hash stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Query io scheduler to see if the current process issuing bio may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * merged with rq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct request_queue *q = rq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (e->type->ops.allow_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return e->type->ops.allow_merge(q, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * can we safely merge with this request?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!blk_rq_merge_ok(rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!elv_iosched_allow_bio_merge(rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) EXPORT_SYMBOL(elv_bio_merge_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline bool elv_support_features(unsigned int elv_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned int required_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return (required_features & elv_features) == required_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * elevator_match - Test an elevator name and features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @e: Scheduler to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @name: Elevator name to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @required_features: Features that the elevator must provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Return true if the elevator @e name matches @name and if @e provides all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * the features specified by @required_features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static bool elevator_match(const struct elevator_type *e, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int required_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!elv_support_features(e->elevator_features, required_features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!strcmp(e->elevator_name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (e->elevator_alias && !strcmp(e->elevator_alias, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * elevator_find - Find an elevator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @name: Name of the elevator to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @required_features: Features that the elevator must provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Return the first registered scheduler with name @name and supporting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * features @required_features and NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct elevator_type *elevator_find(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned int required_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct elevator_type *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) list_for_each_entry(e, &elv_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (elevator_match(e, name, required_features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void elevator_put(struct elevator_type *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) module_put(e->elevator_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static struct elevator_type *elevator_get(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const char *name, bool try_loading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct elevator_type *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) e = elevator_find(name, q->required_elevator_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!e && try_loading) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) request_module("%s-iosched", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) e = elevator_find(name, q->required_elevator_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (e && !try_module_get(e->elevator_owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static struct kobj_type elv_ktype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct elevator_queue *elevator_alloc(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct elevator_type *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct elevator_queue *eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (unlikely(!eq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) eq->type = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kobject_init(&eq->kobj, &elv_ktype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mutex_init(&eq->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) hash_init(eq->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(elevator_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void elevator_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct elevator_queue *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) e = container_of(kobj, struct elevator_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) elevator_put(e->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mutex_lock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) blk_mq_exit_sched(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mutex_unlock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) kobject_put(&e->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void __elv_rqhash_del(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) hash_del(&rq->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rq->rq_flags &= ~RQF_HASHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void elv_rqhash_del(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ELV_ON_HASH(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __elv_rqhash_del(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) EXPORT_SYMBOL_GPL(elv_rqhash_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void elv_rqhash_add(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) BUG_ON(ELV_ON_HASH(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) hash_add(e->hash, &rq->hash, rq_hash_key(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) rq->rq_flags |= RQF_HASHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) EXPORT_SYMBOL_GPL(elv_rqhash_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __elv_rqhash_del(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) elv_rqhash_add(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) BUG_ON(!ELV_ON_HASH(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (unlikely(!rq_mergeable(rq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __elv_rqhash_del(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (rq_hash_key(rq) == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * RB-tree support functions for inserting/lookup/removal of requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * in a sorted RB tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void elv_rb_add(struct rb_root *root, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct request *__rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __rq = rb_entry(parent, struct request, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (blk_rq_pos(rq) < blk_rq_pos(__rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) rb_link_node(&rq->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rb_insert_color(&rq->rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) EXPORT_SYMBOL(elv_rb_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void elv_rb_del(struct rb_root *root, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rb_erase(&rq->rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) RB_CLEAR_NODE(&rq->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) EXPORT_SYMBOL(elv_rb_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct request *elv_rb_find(struct rb_root *root, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rb_node *n = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rq = rb_entry(n, struct request, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (sector < blk_rq_pos(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) else if (sector > blk_rq_pos(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) EXPORT_SYMBOL(elv_rb_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) enum elv_merge elv_merge(struct request_queue *q, struct request **req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct request *__rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Levels of merges:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * nomerges: No merges at all attempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * noxmerges: Only simple one-hit cache try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * merges: All merge tries attempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (blk_queue_nomerges(q) || !bio_mergeable(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * First try one-hit cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) enum elv_merge ret = blk_try_merge(q->last_merge, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (ret != ELEVATOR_NO_MERGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *req = q->last_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (blk_queue_noxmerges(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * See if our hash lookup can find a potential backmerge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (__rq && elv_bio_merge_ok(__rq, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *req = __rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (blk_discard_mergable(__rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return ELEVATOR_DISCARD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ELEVATOR_BACK_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (e->type->ops.request_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return e->type->ops.request_merge(q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Attempt to do an insertion back merge. Only check for the case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * we can append 'rq' to an existing request, so we can throw 'rq' away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Returns true if we merged, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct request *__rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (blk_queue_nomerges(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * First try one-hit cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (blk_queue_noxmerges(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * See if our hash lookup can find a potential backmerge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __rq = elv_rqhash_find(q, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* The merged request could be merged with others, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rq = __rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) void elv_merged_request(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) enum elv_merge type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (e->type->ops.request_merged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) e->type->ops.request_merged(q, rq, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (type == ELEVATOR_BACK_MERGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) elv_rqhash_reposition(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) q->last_merge = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void elv_merge_requests(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (e->type->ops.requests_merged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) e->type->ops.requests_merged(q, rq, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) elv_rqhash_reposition(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) q->last_merge = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct request *elv_latter_request(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (e->type->ops.next_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return e->type->ops.next_request(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct request *elv_former_request(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (e->type->ops.former_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return e->type->ops.former_request(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct elv_fs_entry *entry = to_elv(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct elevator_queue *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ssize_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!entry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) e = container_of(kobj, struct elevator_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mutex_lock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) error = e->type ? entry->show(e, page) : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mutex_unlock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) elv_attr_store(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) const char *page, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct elv_fs_entry *entry = to_elv(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct elevator_queue *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ssize_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!entry->store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) e = container_of(kobj, struct elevator_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mutex_lock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) error = e->type ? entry->store(e, page, length) : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) mutex_unlock(&e->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static const struct sysfs_ops elv_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .show = elv_attr_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .store = elv_attr_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static struct kobj_type elv_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) .sysfs_ops = &elv_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .release = elevator_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int elv_register_queue(struct request_queue *q, bool uevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) lockdep_assert_held(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct elv_fs_entry *attr = e->type->elevator_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) while (attr->attr.name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (sysfs_create_file(&e->kobj, &attr->attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) attr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (uevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) kobject_uevent(&e->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) e->registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void elv_unregister_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) lockdep_assert_held(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kobject_uevent(&e->kobj, KOBJ_REMOVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kobject_del(&e->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) e->registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int elv_register(struct elevator_type *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* create icq_cache if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (e->icq_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) WARN_ON(e->icq_align < __alignof__(struct io_cq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) "%s_io_cq", e->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) e->icq_align, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!e->icq_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* register, don't allow duplicate names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (elevator_find(e->elevator_name, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) kmem_cache_destroy(e->icq_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) list_add_tail(&e->list, &elv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) EXPORT_SYMBOL_GPL(elv_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void elv_unregister(struct elevator_type *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* unregister */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list_del_init(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Destroy icq_cache if it exists. icq's are RCU managed. Make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * sure all RCU operations are complete before proceeding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (e->icq_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kmem_cache_destroy(e->icq_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) e->icq_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) EXPORT_SYMBOL_GPL(elv_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int elevator_switch_mq(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct elevator_type *new_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) lockdep_assert_held(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (q->elevator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (q->elevator->registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) elv_unregister_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ioc_clear_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) elevator_exit(q, q->elevator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ret = blk_mq_init_sched(q, new_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (new_e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ret = elv_register_queue(q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) elevator_exit(q, q->elevator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (new_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) blk_add_trace_msg(q, "elv switch: none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static inline bool elv_support_iosched(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!queue_is_mq(q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * For single queue devices, default to using mq-deadline. If we have multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * queues or mq-deadline is not available, default to "none".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static struct elevator_type *elevator_get_default(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (q->nr_hw_queues != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return elevator_get(q, "mq-deadline", false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Get the first elevator providing the features required by the request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Default to "none" if no matching elevator is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static struct elevator_type *elevator_get_by_features(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct elevator_type *e, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_for_each_entry(e, &elv_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (elv_support_features(e->elevator_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) q->required_elevator_features)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) found = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (found && !try_module_get(found->elevator_owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * For a device queue that has no required features, use the default elevator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * settings. Otherwise, use the first elevator available matching the required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * features. If no suitable elevator is find or if the chosen elevator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * initialization fails, fall back to the "none" elevator (no elevator).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void elevator_init_mq(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct elevator_type *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!elv_support_iosched(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) WARN_ON_ONCE(blk_queue_registered(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (unlikely(q->elevator))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (!q->required_elevator_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) e = elevator_get_default(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) e = elevator_get_by_features(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) err = blk_mq_init_sched(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) pr_warn("\"%s\" elevator initialization failed, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) "falling back to \"none\"\n", e->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) elevator_put(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * switch to new_e io scheduler. be careful not to introduce deadlocks -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * we don't free the old io scheduler, before we have allocated what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * need for the new one. this way we have a chance of going back to the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * one, if the new one fails init for some reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) lockdep_assert_held(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) err = elevator_switch_mq(q, new_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Switch this queue to the given IO scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static int __elevator_change(struct request_queue *q, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) char elevator_name[ELV_NAME_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct elevator_type *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Make sure queue is not in the middle of being removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!blk_queue_registered(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Special case for mq, turn off scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!strncmp(name, "none", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return elevator_switch(q, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) strlcpy(elevator_name, name, sizeof(elevator_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) e = elevator_get(q, strstrip(elevator_name), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (q->elevator &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) elevator_match(q->elevator->type, elevator_name, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) elevator_put(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return elevator_switch(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ssize_t elv_iosched_store(struct request_queue *q, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (!elv_support_iosched(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = __elevator_change(q, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ssize_t elv_iosched_show(struct request_queue *q, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct elevator_type *elv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct elevator_type *__e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return sprintf(name, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) len += sprintf(name+len, "[none] ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) elv = e->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) spin_lock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) list_for_each_entry(__e, &elv_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (elv && elevator_match(elv, __e->elevator_name, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) len += sprintf(name+len, "[%s] ", elv->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (elv_support_iosched(q) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) elevator_match(__e, __e->elevator_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) q->required_elevator_features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) len += sprintf(name+len, "%s ", __e->elevator_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_unlock(&elv_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) len += sprintf(name+len, "none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) len += sprintf(len+name, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct request *elv_rb_former_request(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct rb_node *rbprev = rb_prev(&rq->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (rbprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return rb_entry_rq(rbprev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) EXPORT_SYMBOL(elv_rb_former_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct request *elv_rb_latter_request(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct rb_node *rbnext = rb_next(&rq->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (rbnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return rb_entry_rq(rbnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) EXPORT_SYMBOL(elv_rb_latter_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int __init elevator_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) "Please use sysfs to set IO scheduler for individual devices.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) __setup("elevator=", elevator_setup);