^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/base/power/runtime.c - Helper functions for device runtime PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pm_wakeirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <trace/events/rpm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "../base.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) typedef int (*pm_callback_t)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) pm_callback_t cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) const struct dev_pm_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ops = &dev->pm_domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) else if (dev->type && dev->type->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ops = dev->type->pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) else if (dev->class && dev->class->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ops = dev->class->pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) else if (dev->bus && dev->bus->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ops = dev->bus->pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) cb = *(pm_callback_t *)((void *)ops + cb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!cb && dev->driver && dev->driver->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define RPM_GET_CALLBACK(dev, callback) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static int rpm_resume(struct device *dev, int rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int rpm_suspend(struct device *dev, int rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * update_pm_runtime_accounting - Update the time accounting of power states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @dev: Device to update the accounting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * In order to be able to have time accounting of the various power states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * (as used by programs such as PowerTOP to show the effectiveness of runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * PM), we need to track the time spent in each state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * update_pm_runtime_accounting must be called each time before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * runtime_status field is updated, to account the time in the old state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void update_pm_runtime_accounting(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u64 now, last, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (dev->power.disable_depth > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) last = dev->power.accounting_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) now = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dev->power.accounting_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Because ktime_get_mono_fast_ns() is not monotonic during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * timekeeping updates, ensure that 'now' is after the last saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * timesptamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (now < last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) delta = now - last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (dev->power.runtime_status == RPM_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dev->power.suspended_time += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dev->power.active_time += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void __update_runtime_status(struct device *dev, enum rpm_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) update_pm_runtime_accounting(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) dev->power.runtime_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) update_pm_runtime_accounting(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) time = suspended ? dev->power.suspended_time : dev->power.active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u64 pm_runtime_active_time(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return rpm_get_accounted_time(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u64 pm_runtime_suspended_time(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return rpm_get_accounted_time(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void pm_runtime_deactivate_timer(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (dev->power.timer_expires > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) hrtimer_try_to_cancel(&dev->power.suspend_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev->power.timer_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void pm_runtime_cancel_pending(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pm_runtime_deactivate_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * In case there's a request pending, make sure its work function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * return without doing anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Compute the autosuspend-delay expiration time based on the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * power.last_busy time. If the delay has already expired or is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * (negative) or the power.use_autosuspend flag isn't set, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * This function may be called either with or without dev->power.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Either way it can be racy, since power.last_busy may be updated at any time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 pm_runtime_autosuspend_expiration(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int autosuspend_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u64 expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!dev->power.use_autosuspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (autosuspend_delay < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) expires = READ_ONCE(dev->power.last_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (expires > ktime_get_mono_fast_ns())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return expires; /* Expires in the future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int dev_memalloc_noio(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return dev->power.memalloc_noio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @enable: True for setting the flag and False for clearing the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Set the flag for all devices in the path from the device to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * root device in the device tree if @enable is true, otherwise clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * the flag for devices in the path whose siblings don't set the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * The function should only be called by block device, or network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * device driver for solving the deadlock problem during runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * resume/suspend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * If memory allocation with GFP_KERNEL is called inside runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * resume/suspend callback of any one of its ancestors(or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * block device itself), the deadlock may be triggered inside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * memory allocation since it might not complete until the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * device becomes active and the involed page I/O finishes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * situation is pointed out first by Alan Stern. Network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * are involved in iSCSI kind of situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * The lock of dev_hotplug_mutex is held in the function for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * hotplug race because pm_runtime_set_memalloc_noio() may be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * in async probe().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * The function should be called between device_add() and device_del()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * on the affected device(block/network device).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static DEFINE_MUTEX(dev_hotplug_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) mutex_lock(&dev_hotplug_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* hold power lock since bitfield is not SMP-safe. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) enabled = dev->power.memalloc_noio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev->power.memalloc_noio = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * not need to enable ancestors any more if the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * has been enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (enabled && enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dev = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * clear flag of the parent device only if all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * children don't set the flag because ancestor's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * flag was set by any one of the descendants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!dev || (!enable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) device_for_each_child(dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dev_memalloc_noio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mutex_unlock(&dev_hotplug_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * rpm_check_suspend_allowed - Test whether a device may be suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @dev: Device to test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int rpm_check_suspend_allowed(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (dev->power.runtime_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) else if (dev->power.disable_depth > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) retval = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) else if (atomic_read(&dev->power.usage_count) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) else if (!dev->power.ignore_children &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) atomic_read(&dev->power.child_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Pending resume requests take precedence over suspends. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else if ((dev->power.deferred_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) && dev->power.runtime_status == RPM_SUSPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) || (dev->power.request_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) && dev->power.request == RPM_REQ_RESUME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) else if (__dev_pm_qos_resume_latency(dev) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) retval = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else if (dev->power.runtime_status == RPM_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int rpm_get_suppliers(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) device_links_read_lock_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!(link->flags & DL_FLAG_PM_RUNTIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) retval = pm_runtime_get_sync(link->supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Ignore suppliers with disabled runtime PM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (retval < 0 && retval != -EACCES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pm_runtime_put_noidle(link->supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) refcount_inc(&link->rpm_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * pm_runtime_release_supplier - Drop references to device link's supplier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @link: Target device link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @check_idle: Whether or not to check if the supplier device is idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Drop all runtime PM references associated with @link to its supplier device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * and if @check_idle is set, check if that device is idle (and so it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * suspended).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct device *supplier = link->supplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * The additional power.usage_count check is a safety net in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * the rpm_active refcount becomes saturated, in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * refcount_dec_not_one() would return true forever, but it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * strictly necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) while (refcount_dec_not_one(&link->rpm_active) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) atomic_read(&supplier->power.usage_count) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pm_runtime_put_noidle(supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (check_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pm_request_idle(supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) device_links_read_lock_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pm_runtime_release_supplier(link, try_to_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void rpm_put_suppliers(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) __rpm_put_suppliers(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void rpm_suspend_suppliers(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) device_links_read_lock_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pm_request_idle(link->supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * __rpm_callback - Run a given runtime PM callback for a given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @cb: Runtime PM callback to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @dev: Device to run the callback for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __releases(&dev->power.lock) __acquires(&dev->power.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int retval, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bool use_links = dev->power.links_count > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spin_unlock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Resume suppliers if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * The device's runtime PM status cannot change until this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * routine returns, so it is safe to read the status outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (use_links && dev->power.runtime_status == RPM_RESUMING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) retval = rpm_get_suppliers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rpm_put_suppliers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) retval = cb(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) spin_lock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * If the device is suspending and the callback has returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * success, drop the usage counters of the suppliers that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * been reference counted on its resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Do that if resume fails too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (use_links
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) || (dev->power.runtime_status == RPM_RESUMING && retval))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) __rpm_put_suppliers(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * rpm_idle - Notify device bus type if the device can be suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @dev: Device to notify the bus type about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Check if the device's runtime PM status allows it to be suspended. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * another idle notification has been started earlier, return immediately. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * This function must be called under dev->power.lock with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int rpm_idle(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int (*callback)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) trace_rpm_idle_rcuidle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) retval = rpm_check_suspend_allowed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ; /* Conditions are wrong. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Idle notifications are allowed only in the RPM_ACTIVE state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else if (dev->power.runtime_status != RPM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Any pending request other than an idle notification takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * precedence over us, except that the timer may be running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) else if (dev->power.request_pending &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dev->power.request > RPM_REQ_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Act as though RPM_NOWAIT is always set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) else if (dev->power.idle_notification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) retval = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* Pending requests need to be canceled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (dev->power.no_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Carry out an asynchronous or a synchronous idle notification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (rpmflags & RPM_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev->power.request = RPM_REQ_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!dev->power.request_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev->power.request_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) queue_work(pm_wq, &dev->power.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dev->power.idle_notification = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) callback = RPM_GET_CALLBACK(dev, runtime_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) retval = __rpm_callback(callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dev->power.idle_notification = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) wake_up_all(&dev->power.wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * rpm_callback - Run a given runtime PM callback for a given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @cb: Runtime PM callback to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * @dev: Device to run the callback for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static int rpm_callback(int (*cb)(struct device *), struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (dev->power.memalloc_noio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int noio_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Deadlock might be caused if memory allocation with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * GFP_KERNEL happens inside runtime_suspend and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * runtime_resume callbacks of one block device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * ancestor or the block device itself. Network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * device might be thought as part of iSCSI block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * device, so network device and its ancestor should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * be marked as memalloc_noio too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) noio_flag = memalloc_noio_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) retval = __rpm_callback(cb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) memalloc_noio_restore(noio_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) retval = __rpm_callback(cb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dev->power.runtime_error = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return retval != -EACCES ? retval : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * rpm_suspend - Carry out runtime suspend of given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Check if the device's runtime PM status allows it to be suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Cancel a pending idle notification, autosuspend or suspend. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * another suspend has been started earlier, either return immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * flags. If the RPM_ASYNC flag is set then queue a suspend request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * otherwise run the ->runtime_suspend() callback directly. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * ->runtime_suspend succeeded, if a deferred resume was requested while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * the callback was running then carry it out, otherwise send an idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * notification for its parent (if the suspend succeeded and both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * ignore_children of parent->power and irq_safe of dev->power are not set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * flag is set and the next autosuspend-delay expiration time is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * future, schedule another autosuspend attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * This function must be called under dev->power.lock with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int rpm_suspend(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) __releases(&dev->power.lock) __acquires(&dev->power.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int (*callback)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct device *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) trace_rpm_suspend_rcuidle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) retval = rpm_check_suspend_allowed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto out; /* Conditions are wrong. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* If the autosuspend_delay time hasn't expired yet, reschedule. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if ((rpmflags & RPM_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) && dev->power.runtime_status != RPM_SUSPENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u64 expires = pm_runtime_autosuspend_expiration(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (expires != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Pending requests need to be canceled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Optimization: If the timer is already running and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * set to expire at or before the autosuspend delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * avoid the overhead of resetting it. Just let it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * expire; pm_suspend_timer_fn() will take care of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!(dev->power.timer_expires &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev->power.timer_expires <= expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * We add a slack of 25% to gather wakeups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * without sacrificing the granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) (NSEC_PER_MSEC >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dev->power.timer_expires = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hrtimer_start_range_ns(&dev->power.suspend_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ns_to_ktime(expires),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) slack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dev->power.timer_autosuspends = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Other scheduled or pending requests need to be canceled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pm_runtime_cancel_pending(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (dev->power.runtime_status == RPM_SUSPENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) retval = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spin_unlock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) spin_lock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Wait for the other suspend running in parallel with us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) prepare_to_wait(&dev->power.wait_queue, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (dev->power.runtime_status != RPM_SUSPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) finish_wait(&dev->power.wait_queue, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (dev->power.no_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto no_callback; /* Assume success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Carry out an asynchronous or a synchronous suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (rpmflags & RPM_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dev->power.request = (rpmflags & RPM_AUTO) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!dev->power.request_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev->power.request_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) queue_work(pm_wq, &dev->power.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) __update_runtime_status(dev, RPM_SUSPENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) callback = RPM_GET_CALLBACK(dev, runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dev_pm_enable_wake_irq_check(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) retval = rpm_callback(callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) no_callback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) __update_runtime_status(dev, RPM_SUSPENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pm_runtime_deactivate_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (dev->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) parent = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) atomic_add_unless(&parent->power.child_count, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) wake_up_all(&dev->power.wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (dev->power.deferred_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev->power.deferred_resume = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) rpm_resume(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (dev->power.irq_safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Maybe the parent is now able to suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (parent && !parent->power.ignore_children) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) spin_unlock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) spin_lock(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) rpm_idle(parent, RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_unlock(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) spin_lock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* Maybe the suppliers are now able to suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (dev->power.links_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) rpm_suspend_suppliers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_pm_disable_wake_irq_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) __update_runtime_status(dev, RPM_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) dev->power.deferred_resume = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) wake_up_all(&dev->power.wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (retval == -EAGAIN || retval == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dev->power.runtime_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * If the callback routine failed an autosuspend, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * if the last_busy time has been updated so that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * is a new autosuspend expiration time, automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * reschedule another autosuspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if ((rpmflags & RPM_AUTO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pm_runtime_autosuspend_expiration(dev) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pm_runtime_cancel_pending(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * rpm_resume - Carry out runtime resume of given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Check if the device's runtime PM status allows it to be resumed. Cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * any scheduled or pending requests. If another resume has been started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * earlier, either return immediately or wait for it to finish, depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * parallel with this function, either tell the other process to resume after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * flag is set then queue a resume request; otherwise run the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * ->runtime_resume() callback directly. Queue an idle notification for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * device if the resume succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * This function must be called under dev->power.lock with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int rpm_resume(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) __releases(&dev->power.lock) __acquires(&dev->power.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int (*callback)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct device *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) trace_rpm_resume_rcuidle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (dev->power.runtime_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) else if (dev->power.disable_depth == 1 && dev->power.is_suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) && dev->power.runtime_status == RPM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) else if (dev->power.disable_depth > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) retval = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Other scheduled or pending requests need to be canceled. Small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * optimization: If an autosuspend timer is running, leave it running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * rather than cancelling it now only to restart it again in the near
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!dev->power.timer_autosuspends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pm_runtime_deactivate_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (dev->power.runtime_status == RPM_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (dev->power.runtime_status == RPM_RESUMING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) || dev->power.runtime_status == RPM_SUSPENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (dev->power.runtime_status == RPM_SUSPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev->power.deferred_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) retval = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_unlock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_lock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Wait for the operation carried out in parallel with us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) prepare_to_wait(&dev->power.wait_queue, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (dev->power.runtime_status != RPM_RESUMING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) && dev->power.runtime_status != RPM_SUSPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) finish_wait(&dev->power.wait_queue, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * See if we can skip waking up the parent. This is safe only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * power.no_callbacks is set, because otherwise we don't know whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * the resume will actually succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (dev->power.no_callbacks && !parent && dev->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (dev->parent->power.disable_depth > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) || dev->parent->power.ignore_children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) || dev->parent->power.runtime_status == RPM_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) atomic_inc(&dev->parent->power.child_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) spin_unlock(&dev->parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto no_callback; /* Assume success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) spin_unlock(&dev->parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Carry out an asynchronous or a synchronous resume. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (rpmflags & RPM_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev->power.request = RPM_REQ_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!dev->power.request_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dev->power.request_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) queue_work(pm_wq, &dev->power.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!parent && dev->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Increment the parent's usage counter and resume it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * necessary. Not needed if dev is irq-safe; then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * parent is permanently resumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) parent = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (dev->power.irq_safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto skip_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) spin_unlock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) pm_runtime_get_noresume(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) spin_lock(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Resume the parent if it has runtime PM enabled and not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * set to ignore its children.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!parent->power.disable_depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) && !parent->power.ignore_children) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) rpm_resume(parent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (parent->power.runtime_status != RPM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spin_unlock(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) spin_lock(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) skip_parent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (dev->power.no_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto no_callback; /* Assume success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) __update_runtime_status(dev, RPM_RESUMING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) callback = RPM_GET_CALLBACK(dev, runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dev_pm_disable_wake_irq_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) retval = rpm_callback(callback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) __update_runtime_status(dev, RPM_SUSPENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) pm_runtime_cancel_pending(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dev_pm_enable_wake_irq_check(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) no_callback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) __update_runtime_status(dev, RPM_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) atomic_inc(&parent->power.child_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) wake_up_all(&dev->power.wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (retval >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) rpm_idle(dev, RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (parent && !dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pm_runtime_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * pm_runtime_work - Universal runtime PM work function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * @work: Work structure used for scheduling the execution of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Use @work to get the device object the work is to be done for, determine what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * is to be done and execute the appropriate runtime PM function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static void pm_runtime_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct device *dev = container_of(work, struct device, power.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) enum rpm_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (!dev->power.request_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) req = dev->power.request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dev->power.request_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) switch (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) case RPM_REQ_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case RPM_REQ_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rpm_idle(dev, RPM_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case RPM_REQ_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rpm_suspend(dev, RPM_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case RPM_REQ_AUTOSUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case RPM_REQ_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) rpm_resume(dev, RPM_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * @data: Device pointer passed by pm_schedule_suspend().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * Check if the time is right and queue a suspend request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct device *dev = container_of(timer, struct device, power.suspend_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) u64 expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) expires = dev->power.timer_expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * If 'expires' is after the current time, we've been called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * too early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dev->power.timer_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rpm_suspend(dev, dev->power.timer_autosuspends ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * @delay: Time to wait before submitting a suspend request, in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int pm_schedule_suspend(struct device *dev, unsigned int delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) u64 expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) retval = rpm_suspend(dev, RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) retval = rpm_check_suspend_allowed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Other scheduled or pending requests need to be canceled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pm_runtime_cancel_pending(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) dev->power.timer_expires = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dev->power.timer_autosuspends = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) EXPORT_SYMBOL_GPL(pm_schedule_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * __pm_runtime_idle - Entry point for runtime idle operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * @dev: Device to send idle notification for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * If the RPM_GET_PUT flag is set, decrement the device's usage count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * return immediately if it is larger than zero. Then carry out an idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * notification, either synchronous or asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * This routine may be called in atomic context if the RPM_ASYNC flag is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * or if pm_runtime_irq_safe() has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int __pm_runtime_idle(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (rpmflags & RPM_GET_PUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!atomic_dec_and_test(&dev->power.usage_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) trace_rpm_usage_rcuidle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) retval = rpm_idle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXPORT_SYMBOL_GPL(__pm_runtime_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * If the RPM_GET_PUT flag is set, decrement the device's usage count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * return immediately if it is larger than zero. Then carry out a suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * either synchronous or asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * This routine may be called in atomic context if the RPM_ASYNC flag is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * or if pm_runtime_irq_safe() has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int __pm_runtime_suspend(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (rpmflags & RPM_GET_PUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!atomic_dec_and_test(&dev->power.usage_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) trace_rpm_usage_rcuidle(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) retval = rpm_suspend(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * __pm_runtime_resume - Entry point for runtime resume operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * @rpmflags: Flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * carry out a resume, either synchronous or asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * This routine may be called in atomic context if the RPM_ASYNC flag is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * or if pm_runtime_irq_safe() has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int __pm_runtime_resume(struct device *dev, int rpmflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dev->power.runtime_status != RPM_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (rpmflags & RPM_GET_PUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) atomic_inc(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) retval = rpm_resume(dev, rpmflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXPORT_SYMBOL_GPL(__pm_runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * pm_runtime_get_if_active - Conditionally bump up device usage counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @ign_usage_count: Whether or not to look at the current usage counter value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Return -EINVAL if runtime PM is disabled for @dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * without changing the usage counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * If @ign_usage_count is %true, this function can be used to prevent suspending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * the device when its runtime PM status is %RPM_ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * If @ign_usage_count is %false, this function can be used to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * runtime PM usage counter is not zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * The caller is resposible for decrementing the runtime PM usage counter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * @dev after this function has returned a positive value for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (dev->power.disable_depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) } else if (dev->power.runtime_status != RPM_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) } else if (ign_usage_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) atomic_inc(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) retval = atomic_inc_not_zero(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) trace_rpm_usage_rcuidle(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * __pm_runtime_set_status - Set runtime PM status of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * @status: New runtime PM status of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * If runtime PM of the device is disabled or its power.runtime_error field is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * different from zero, the status may be changed either to RPM_ACTIVE, or to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * RPM_SUSPENDED, as long as that reflects the actual state of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * However, if the device has a parent and the parent is not active, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * parent's power.ignore_children flag is unset, the device's status cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * set to RPM_ACTIVE, so -EBUSY is returned in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * If successful, __pm_runtime_set_status() clears the power.runtime_error field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * and the device parent's counter of unsuspended children is modified to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * reflect the new status. If the new status is RPM_SUSPENDED, an idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * notification request for the parent is submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * If @dev has any suppliers (as reflected by device links to them), and @status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * is RPM_ACTIVE, they will be activated upfront and if the activation of one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * of the @status value) and the suppliers will be deacticated on exit. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * error returned by the failing supplier activation will be returned in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int __pm_runtime_set_status(struct device *dev, unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct device *parent = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) bool notify_parent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * Prevent PM-runtime from being enabled for the device or return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * error if it is enabled already and working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (dev->power.runtime_error || dev->power.disable_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev->power.disable_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * If the new status is RPM_ACTIVE, the suppliers can be activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * upfront regardless of the current status, because next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * rpm_put_suppliers() runs, the rpm_active refcounts of the links
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * involved will be dropped down to one anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (status == RPM_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) int idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) error = rpm_get_suppliers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) status = RPM_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (dev->power.runtime_status == status || !parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) goto out_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (status == RPM_SUSPENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) atomic_add_unless(&parent->power.child_count, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) notify_parent = !parent->power.ignore_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * It is invalid to put an active child under a parent that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * not active, has runtime PM enabled and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * 'power.ignore_children' flag unset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (!parent->power.disable_depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) && !parent->power.ignore_children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) && parent->power.runtime_status != RPM_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dev_name(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev_name(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) } else if (dev->power.runtime_status == RPM_SUSPENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) atomic_inc(&parent->power.child_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) spin_unlock(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) status = RPM_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) out_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) __update_runtime_status(dev, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) dev->power.runtime_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (notify_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pm_request_idle(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (status == RPM_SUSPENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) rpm_put_suppliers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * __pm_runtime_barrier - Cancel pending requests and wait for completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Flush all pending requests for the device from pm_wq and wait for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * runtime PM operations involving the device in progress to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * Should be called under dev->power.lock with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void __pm_runtime_barrier(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) pm_runtime_deactivate_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (dev->power.request_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) cancel_work_sync(&dev->power.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dev->power.request_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (dev->power.runtime_status == RPM_SUSPENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) || dev->power.runtime_status == RPM_RESUMING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) || dev->power.idle_notification) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /* Suspend, wake-up or idle notification in progress. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) prepare_to_wait(&dev->power.wait_queue, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (dev->power.runtime_status != RPM_SUSPENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) && dev->power.runtime_status != RPM_RESUMING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) && !dev->power.idle_notification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) finish_wait(&dev->power.wait_queue, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * pm_runtime_barrier - Flush pending requests and wait for completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * Prevent the device from being suspended by incrementing its usage counter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * if there's a pending resume request for the device, wake the device up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * Next, make sure that all pending requests for the device have been flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * from pm_wq and wait for all runtime PM operations involving the device in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * progress to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * 1, if there was a resume request pending and the device had to be woken up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * 0, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int pm_runtime_barrier(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) pm_runtime_get_noresume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (dev->power.request_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) && dev->power.request == RPM_REQ_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) rpm_resume(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) __pm_runtime_barrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) EXPORT_SYMBOL_GPL(pm_runtime_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * __pm_runtime_disable - Disable runtime PM of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * @check_resume: If set, check if there's a resume request for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * Increment power.disable_depth for the device and if it was zero previously,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * cancel all pending runtime PM requests for the device and wait for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * operations in progress to complete. The device can be either active or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * suspended after its runtime PM has been disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * If @check_resume is set and there's a resume request pending when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * __pm_runtime_disable() is called and power.disable_depth is zero, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * function will wake up the device before disabling its runtime PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) void __pm_runtime_disable(struct device *dev, bool check_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (dev->power.disable_depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) dev->power.disable_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * Wake up the device if there's a resume request pending, because that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * means there probably is some I/O to process and disabling runtime PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * shouldn't prevent the device from processing the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (check_resume && dev->power.request_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) && dev->power.request == RPM_REQ_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Prevent suspends and idle notifications from being carried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * out after we have woken up the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) pm_runtime_get_noresume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) rpm_resume(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* Update time accounting before disabling PM-runtime. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) update_pm_runtime_accounting(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!dev->power.disable_depth++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) __pm_runtime_barrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) EXPORT_SYMBOL_GPL(__pm_runtime_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * pm_runtime_enable - Enable runtime PM of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) void pm_runtime_enable(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (dev->power.disable_depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) dev->power.disable_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* About to enable runtime pm, set accounting_timestamp to now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!dev->power.disable_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) dev_warn(dev, "Unbalanced %s!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) WARN(!dev->power.disable_depth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) dev->power.runtime_status == RPM_SUSPENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) !dev->power.ignore_children &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) atomic_read(&dev->power.child_count) > 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) "Enabling runtime PM for inactive device (%s) with active children\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) EXPORT_SYMBOL_GPL(pm_runtime_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * pm_runtime_forbid - Block runtime PM of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * Increase the device's usage count and clear its power.runtime_auto flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * so that it cannot be suspended at run time until pm_runtime_allow() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) void pm_runtime_forbid(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (!dev->power.runtime_auto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) dev->power.runtime_auto = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) atomic_inc(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) rpm_resume(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) EXPORT_SYMBOL_GPL(pm_runtime_forbid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * pm_runtime_allow - Unblock runtime PM of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Decrease the device's usage count and set its power.runtime_auto flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) void pm_runtime_allow(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (dev->power.runtime_auto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) dev->power.runtime_auto = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (atomic_dec_and_test(&dev->power.usage_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) EXPORT_SYMBOL_GPL(pm_runtime_allow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * Set the power.no_callbacks flag, which tells the PM core that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * device is power-managed through its parent and has no runtime PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * callbacks of its own. The runtime sysfs attributes will be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) void pm_runtime_no_callbacks(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev->power.no_callbacks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (device_is_registered(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) rpm_sysfs_remove(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * @dev: Device to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * Set the power.irq_safe flag, which tells the PM core that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * always be invoked with the spinlock held and interrupts disabled. It also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * causes the parent's usage counter to be permanently incremented, preventing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * the parent from runtime suspending -- otherwise an irq-safe child might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * to wait for a non-irq-safe parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) void pm_runtime_irq_safe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pm_runtime_get_sync(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) dev->power.irq_safe = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * update_autosuspend - Handle a change to a device's autosuspend settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * @old_delay: The former autosuspend_delay value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * @old_use: The former use_autosuspend value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * Prevent runtime suspend if the new delay is negative and use_autosuspend is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * set; otherwise allow it. Send an idle notification if suspends are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * This function must be called under dev->power.lock with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static void update_autosuspend(struct device *dev, int old_delay, int old_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) int delay = dev->power.autosuspend_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /* Should runtime suspend be prevented now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (dev->power.use_autosuspend && delay < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /* If it used to be allowed then prevent it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (!old_use || old_delay >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) atomic_inc(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) rpm_resume(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) trace_rpm_usage_rcuidle(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) /* Runtime suspend should be allowed now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* If it used to be prevented then allow it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (old_use && old_delay < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) atomic_dec(&dev->power.usage_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* Maybe we can autosuspend now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rpm_idle(dev, RPM_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * @delay: Value of the new delay in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * Set the device's power.autosuspend_delay value. If it changes to negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * changes the other way, allow runtime suspends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) int old_delay, old_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) old_delay = dev->power.autosuspend_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) old_use = dev->power.use_autosuspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dev->power.autosuspend_delay = delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) update_autosuspend(dev, old_delay, old_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * @use: New value for use_autosuspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Set the device's power.use_autosuspend flag, and allow or prevent runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * suspends as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) void __pm_runtime_use_autosuspend(struct device *dev, bool use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int old_delay, old_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) old_delay = dev->power.autosuspend_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) old_use = dev->power.use_autosuspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) dev->power.use_autosuspend = use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) update_autosuspend(dev, old_delay, old_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * pm_runtime_init - Initialize runtime PM fields in given device object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * @dev: Device object to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) void pm_runtime_init(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dev->power.runtime_status = RPM_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) dev->power.idle_notification = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dev->power.disable_depth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) atomic_set(&dev->power.usage_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) dev->power.runtime_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) atomic_set(&dev->power.child_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) pm_suspend_ignore_children(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) dev->power.runtime_auto = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) dev->power.request_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) dev->power.request = RPM_REQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) dev->power.deferred_resume = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) dev->power.needs_force_resume = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) INIT_WORK(&dev->power.work, pm_runtime_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) dev->power.timer_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) dev->power.suspend_timer.function = pm_suspend_timer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) init_waitqueue_head(&dev->power.wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * @dev: Device object to re-initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) void pm_runtime_reinit(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!pm_runtime_enabled(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (dev->power.runtime_status == RPM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (dev->power.irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) dev->power.irq_safe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) pm_runtime_put(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * pm_runtime_remove - Prepare for removing a device from device hierarchy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * @dev: Device object being removed from device hierarchy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) void pm_runtime_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) __pm_runtime_disable(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) pm_runtime_reinit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * @dev: Consumer device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) void pm_runtime_get_suppliers(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) device_links_read_lock_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (link->flags & DL_FLAG_PM_RUNTIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) link->supplier_preactivated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) pm_runtime_get_sync(link->supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) refcount_inc(&link->rpm_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * pm_runtime_put_suppliers - Drop references to supplier devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * @dev: Consumer device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) void pm_runtime_put_suppliers(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) bool put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) device_links_read_lock_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (link->supplier_preactivated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) link->supplier_preactivated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) put = pm_runtime_status_suspended(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) refcount_dec_not_one(&link->rpm_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) pm_runtime_put(link->supplier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) void pm_runtime_new_link(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) dev->power.links_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static void pm_runtime_drop_link_count(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) WARN_ON(dev->power.links_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) dev->power.links_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * pm_runtime_drop_link - Prepare for device link removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * @link: Device link going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * Drop the link count of the consumer end of @link and decrement the supplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * device's runtime PM usage counter as many times as needed to drop all of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * PM runtime reference to it from the consumer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) void pm_runtime_drop_link(struct device_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!(link->flags & DL_FLAG_PM_RUNTIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) pm_runtime_drop_link_count(link->consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) pm_runtime_release_supplier(link, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static bool pm_runtime_need_not_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return atomic_read(&dev->power.usage_count) <= 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) (atomic_read(&dev->power.child_count) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) dev->power.ignore_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * pm_runtime_force_suspend - Force a device into suspend state if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * Disable runtime PM so we safely can check the device's runtime PM status and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * if it is active, invoke its ->runtime_suspend callback to suspend it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * usage and children counters don't indicate that the device was in use before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * the system-wide transition under way, decrement its parent's children counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * (if there is a parent). Keep runtime PM disabled to preserve the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * unless we encounter errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * Typically this function may be invoked from a system suspend callback to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * sure the device is put into low power state and it should only be used during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * system-wide PM transitions to sleep states. It assumes that the analogous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * pm_runtime_force_resume() will be used to resume the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) int pm_runtime_force_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int (*callback)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (pm_runtime_status_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) callback = RPM_GET_CALLBACK(dev, runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) ret = callback ? callback(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * If the device can stay in suspend after the system-wide transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * to the working state that will follow, drop the children counter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * its parent, but set its status to RPM_SUSPENDED anyway in case this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * function will be called again for it in the meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (pm_runtime_need_not_resume(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) __update_runtime_status(dev, RPM_SUSPENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) dev->power.needs_force_resume = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * pm_runtime_force_resume - Force a device into resume state if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * Prior invoking this function we expect the user to have brought the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * those actions and bring the device into full power, if it is expected to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * used on system resume. In the other case, we defer the resume to be managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * via runtime PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * Typically this function may be invoked from a system resume callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) int pm_runtime_force_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int (*callback)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * The value of the parent's children counter is correct already, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * just update the status of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) __update_runtime_status(dev, RPM_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) callback = RPM_GET_CALLBACK(dev, runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ret = callback ? callback(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) dev->power.needs_force_resume = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) EXPORT_SYMBOL_GPL(pm_runtime_force_resume);