Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Functions related to generic timeout handling of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/fault-inject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #ifdef CONFIG_FAIL_IO_TIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static DECLARE_FAULT_ATTR(fail_io_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static int __init setup_fail_io_timeout(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	return setup_fault_attr(&fail_io_timeout, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) __setup("fail_io_timeout=", setup_fail_io_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) bool __blk_should_fake_timeout(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	return should_fail(&fail_io_timeout, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int __init fail_io_timeout_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 						NULL, &fail_io_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	return PTR_ERR_OR_ZERO(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) late_initcall(fail_io_timeout_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return sprintf(buf, "%d\n", set != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		char *p = (char *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		val = simple_strtoul(p, &p, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #endif /* CONFIG_FAIL_IO_TIMEOUT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * blk_abort_request - Request recovery for the specified command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * @req:	pointer to the request of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * This function requests that the block layer start recovery for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * request by deleting the timer and calling the q's timeout function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * LLDDs who implement their own error recovery MAY ignore the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * event if they generated blk_abort_request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void blk_abort_request(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * All we need to ensure is that timeout scan takes place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * immediately and that scan sees the new timeout value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * No need for fancy synchronizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	WRITE_ONCE(req->deadline, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	kblockd_schedule_work(&req->q->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) EXPORT_SYMBOL_GPL(blk_abort_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static unsigned long blk_timeout_mask __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static int __init blk_timeout_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) late_initcall(blk_timeout_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Just a rough estimate, we don't care about specific values for timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline unsigned long blk_round_jiffies(unsigned long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return (j + blk_timeout_mask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long blk_rq_timeout(unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned long maxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (time_after(timeout, maxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		timeout = maxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * blk_add_timer - Start timeout timer for a single request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * @req:	request that is about to start running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *    Each request has its own timer, and as it is added to the queue, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *    set up the timer. When the request completes, we cancel the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void blk_add_timer(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct request_queue *q = req->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	unsigned long expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 * Some LLDs, like scsi, peek at the timeout to prevent a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * command from being retried forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (!req->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		req->timeout = q->rq_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	req->rq_flags &= ~RQF_TIMED_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	expiry = jiffies + req->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	WRITE_ONCE(req->deadline, expiry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * If the timer isn't already pending or this timeout is earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * than an existing one, modify the timer. Round up to next nearest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	expiry = blk_rq_timeout(blk_round_jiffies(expiry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (!timer_pending(&q->timeout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	    time_before(expiry, q->timeout.expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		unsigned long diff = q->timeout.expires - expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		 * Due to added timer slack to group timers, the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		 * will often be a little in front of what we asked for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		 * So apply some tolerance here too, otherwise we keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		 * modifying the timer because expires for value X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		 * will be X + something.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			mod_timer(&q->timeout, expiry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }