^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * scsi_pm.c Copyright (C) 2010 Alan Stern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * SCSI dynamic Power Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Initial version: Alan Stern <stern@rowland.harvard.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/blk-pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <scsi/scsi_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "scsi_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return pm && pm->suspend ? pm->suspend(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return pm && pm->freeze ? pm->freeze(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return pm && pm->poweroff ? pm->poweroff(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return pm && pm->resume ? pm->resume(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return pm && pm->thaw ? pm->thaw(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return pm && pm->restore ? pm->restore(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int scsi_dev_type_suspend(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int (*cb)(struct device *, const struct dev_pm_ops *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* flush pending in-flight resume operations, suspend is synchronous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) async_synchronize_full_domain(&scsi_sd_pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) err = scsi_device_quiesce(to_scsi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) err = cb(dev, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) scsi_device_resume(to_scsi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dev_dbg(dev, "scsi suspend: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int scsi_dev_type_resume(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int (*cb)(struct device *, const struct dev_pm_ops *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) err = cb(dev, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) scsi_device_resume(to_scsi_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dev_dbg(dev, "scsi resume: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) err = pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Forcibly set runtime PM status of request queue to "active"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * to make sure we can again get requests from the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * (see also blk_pm_peek_request()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * The resume hook will correct runtime PM status of the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!err && scsi_is_sdev_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) blk_set_runtime_active(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) scsi_bus_suspend_common(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int (*cb)(struct device *, const struct dev_pm_ops *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (scsi_is_sdev_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * All the high-level SCSI drivers that implement runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * PM treat runtime suspend, system suspend, and system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * hibernate nearly identically. In all cases the requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * for runtime suspension are stricter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) err = scsi_dev_type_suspend(dev, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void async_sdev_resume(void *dev, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) scsi_dev_type_resume(dev, do_scsi_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void async_sdev_thaw(void *dev, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) scsi_dev_type_resume(dev, do_scsi_thaw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void async_sdev_restore(void *dev, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) scsi_dev_type_resume(dev, do_scsi_restore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int scsi_bus_resume_common(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int (*cb)(struct device *, const struct dev_pm_ops *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) async_func_t fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!scsi_is_sdev_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) else if (cb == do_scsi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) fn = async_sdev_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) else if (cb == do_scsi_thaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) fn = async_sdev_thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) else if (cb == do_scsi_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) fn = async_sdev_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * If a user has disabled async probing a likely reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * is due to a storage enclosure that does not inject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * staggered spin-ups. For safety, make resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * synchronous as well in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (strncmp(scsi_scan_type, "async", 5) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) async_synchronize_full_domain(&scsi_sd_pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int scsi_bus_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (scsi_is_host_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Wait until async scanning is finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) scsi_complete_async_scans();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int scsi_bus_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return scsi_bus_suspend_common(dev, do_scsi_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int scsi_bus_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return scsi_bus_resume_common(dev, do_scsi_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int scsi_bus_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return scsi_bus_suspend_common(dev, do_scsi_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int scsi_bus_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return scsi_bus_resume_common(dev, do_scsi_thaw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int scsi_bus_poweroff(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return scsi_bus_suspend_common(dev, do_scsi_poweroff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static int scsi_bus_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return scsi_bus_resume_common(dev, do_scsi_restore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #else /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define scsi_bus_prepare NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define scsi_bus_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define scsi_bus_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define scsi_bus_freeze NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define scsi_bus_thaw NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define scsi_bus_poweroff NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define scsi_bus_restore NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int sdev_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) err = blk_pre_runtime_suspend(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (pm && pm->runtime_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) err = pm->runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) blk_post_runtime_suspend(sdev->request_queue, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int scsi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dev_dbg(dev, "scsi_runtime_suspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (scsi_is_sdev_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) err = sdev_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Insert hooks here for targets, hosts, and transport classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int sdev_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) blk_pre_runtime_resume(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (pm && pm->runtime_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err = pm->runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) blk_post_runtime_resume(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int scsi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dev_dbg(dev, "scsi_runtime_resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (scsi_is_sdev_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) err = sdev_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Insert hooks here for targets, hosts, and transport classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int scsi_runtime_idle(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) dev_dbg(dev, "scsi_runtime_idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Insert hooks here for targets, hosts, and transport classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (scsi_is_sdev_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pm_runtime_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int scsi_autopm_get_device(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err = pm_runtime_get_sync(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (err < 0 && err !=-EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) pm_runtime_put_sync(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) void scsi_autopm_put_device(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pm_runtime_put_sync(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void scsi_autopm_get_target(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pm_runtime_get_sync(&starget->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void scsi_autopm_put_target(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pm_runtime_put_sync(&starget->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int scsi_autopm_get_host(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) err = pm_runtime_get_sync(&shost->shost_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (err < 0 && err !=-EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pm_runtime_put_sync(&shost->shost_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void scsi_autopm_put_host(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pm_runtime_put_sync(&shost->shost_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) const struct dev_pm_ops scsi_bus_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .prepare = scsi_bus_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .suspend = scsi_bus_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .resume = scsi_bus_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .freeze = scsi_bus_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .thaw = scsi_bus_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .poweroff = scsi_bus_poweroff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .restore = scsi_bus_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .runtime_suspend = scsi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .runtime_resume = scsi_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .runtime_idle = scsi_runtime_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };