^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Functions related to setting various queue properties from drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "blk-mq-sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * blk_end_sync_rq - executes a completion event on a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @rq: request to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @error: end I/O status of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void blk_end_sync_rq(struct request *rq, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct completion *waiting = rq->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) rq->end_io_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * complete last, if this is a stack request the process (and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the rq pointer) could be invalid right after this complete()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) complete(waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * blk_execute_rq_nowait - insert a request into queue for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @q: queue to insert the request in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @bd_disk: matching gendisk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @rq: request to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @at_head: insert request at head or tail of queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @done: I/O completion handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Insert a fully prepared request at the back of the I/O scheduler queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * for execution. Don't wait for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * This function will invoke @done directly if the queue is dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct request *rq, int at_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) rq_end_io_fn *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) WARN_ON(irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) WARN_ON(!blk_rq_is_passthrough(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) rq->rq_disk = bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rq->end_io = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) blk_account_io_start(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * don't check dying flag for MQ because the request won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * be reused after dying flag is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) blk_mq_sched_insert_request(rq, at_head, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * blk_execute_rq - insert a request into queue for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @q: queue to insert the request in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @bd_disk: matching gendisk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @rq: request to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @at_head: insert request at head or tail of queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Insert a fully prepared request at the back of the I/O scheduler queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * for execution and wait for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct request *rq, int at_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) DECLARE_COMPLETION_ONSTACK(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long hang_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rq->end_io_data = &wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Prevent hang_check timer from firing at us during very long I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) hang_check = sysctl_hung_task_timeout_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (hang_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) wait_for_completion_io(&wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) EXPORT_SYMBOL(blk_execute_rq);