^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/blk-pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "blk-mq-tag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * blk_pm_runtime_init - Block layer runtime PM initialization routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * @dev: the device the queue belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Initialize runtime-PM-related fields for @q and start auto suspend for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @dev. Drivers that want to take advantage of request-based runtime PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * should call this function after @dev has been initialized, and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * request queue @q has been allocated, and runtime PM for it can not happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * yet(either due to disabled/forbidden or its usage_count > 0). In most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * cases, driver should call this function before any I/O has taken place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * This function takes care of setting up using auto suspend for the device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * the autosuspend delay is set to -1 to make runtime suspend impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * until an updated value is either set by user or by driver. Drivers do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * not need to touch other autosuspend settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The block layer runtime PM is request based, so only works for drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * that use request as their IO unit instead of those directly use bio's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) q->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) q->rpm_status = RPM_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pm_runtime_set_autosuspend_delay(q->dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pm_runtime_use_autosuspend(q->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) EXPORT_SYMBOL(blk_pm_runtime_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * blk_pre_runtime_suspend - Pre runtime suspend check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * This function will check if runtime suspend is allowed for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * by examining if there are any requests pending in the queue. If there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * are requests pending, the device can not be runtime suspended; otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * the queue's status will be updated to SUSPENDING and the driver can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * proceed to suspend the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * For the not allowed case, we mark last busy for the device so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * runtime PM core will try to autosuspend it some time later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * This function should be called near the start of the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * runtime_suspend callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * 0 - OK to runtime suspend the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * -EBUSY - Device should not be runtime suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int blk_pre_runtime_suspend(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!q->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) q->rpm_status = RPM_SUSPENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Increase the pm_only counter before checking whether any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * non-PM blk_queue_enter() calls are in progress to avoid that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * new non-PM blk_queue_enter() calls succeed before the pm_only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * counter is decreased again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) blk_set_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Switch q_usage_counter from per-cpu to atomic mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) blk_freeze_queue_start(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Wait until atomic mode has been reached. Since that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * involves calling call_rcu(), it is guaranteed that later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * blk_queue_enter() calls see the pm-only state. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * http://lwn.net/Articles/573497/.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (percpu_ref_is_zero(&q->q_usage_counter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Switch q_usage_counter back to per-cpu mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) q->rpm_status = RPM_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pm_runtime_mark_last_busy(q->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) blk_clear_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) EXPORT_SYMBOL(blk_pre_runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * blk_post_runtime_suspend - Post runtime suspend processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @err: return value of the device's runtime_suspend function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Update the queue's runtime status according to the return value of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * device's runtime suspend function and mark last busy for the device so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * that PM core will try to auto suspend the device at a later time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * This function should be called near the end of the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * runtime_suspend callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void blk_post_runtime_suspend(struct request_queue *q, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!q->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) q->rpm_status = RPM_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) q->rpm_status = RPM_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pm_runtime_mark_last_busy(q->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) blk_clear_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL(blk_post_runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * blk_pre_runtime_resume - Pre runtime resume processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Update the queue's runtime status to RESUMING in preparation for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * runtime resume of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * This function should be called near the start of the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * runtime_resume callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void blk_pre_runtime_resume(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!q->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) q->rpm_status = RPM_RESUMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) EXPORT_SYMBOL(blk_pre_runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * blk_post_runtime_resume - Post runtime resume processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * For historical reasons, this routine merely calls blk_set_runtime_active()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * to do the real work of restarting the queue. It does this regardless of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * whether the device's runtime-resume succeeded; even if it failed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * driver or error handler will need to communicate with the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * This function should be called near the end of the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * runtime_resume callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void blk_post_runtime_resume(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) blk_set_runtime_active(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(blk_post_runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * blk_set_runtime_active - Force runtime status of the queue to be active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @q: the queue of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * If the device is left runtime suspended during system suspend the resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * hook typically resumes the device and corrects runtime status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * accordingly. However, that does not affect the queue runtime PM status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * which is still "suspended". This prevents processing requests from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * This function can be used in driver's resume hook to correct queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * runtime PM status and re-enable peeking requests from the queue. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * should be called before first request is added to the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * This function is also called by blk_post_runtime_resume() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * runtime resumes. It does everything necessary to restart the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void blk_set_runtime_active(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int old_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!q->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) old_status = q->rpm_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) q->rpm_status = RPM_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pm_runtime_mark_last_busy(q->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pm_request_autosuspend(q->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (old_status != RPM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) blk_clear_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL(blk_set_runtime_active);