Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * drivers/base/power/main.c - Where the driver meets power management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2003 Patrick Mochel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (c) 2003 Open Source Development Lab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * The driver model core calls device_pm_add() when a device is registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * This will initialize the embedded device_pm_info object in the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * and add it to the list of power-controlled devices. sysfs entries for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * controlling device power management will also be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * A separate list is used for keeping track of power info, because the power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * domain dependencies may differ from the ancestral dependencies that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * subsystem list maintains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define pr_fmt(fmt) "PM: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pm-trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/pm_wakeirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/devfreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/wakeup_reason.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "../base.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) typedef int (*pm_callback_t)(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define list_for_each_entry_rcu_locked(pos, head, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	list_for_each_entry_rcu(pos, head, member, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			device_links_read_lock_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * The entries in the dpm_list list are in a depth first order, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * because children are guaranteed to be discovered after parents, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * are inserted at the back of the list on discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * Since device_pm_add() may be called with a device lock held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * we must never try to acquire a device lock while holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * dpm_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) LIST_HEAD(dpm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static LIST_HEAD(dpm_prepared_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static LIST_HEAD(dpm_suspended_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static LIST_HEAD(dpm_late_early_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static LIST_HEAD(dpm_noirq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) struct suspend_stats suspend_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static DEFINE_MUTEX(dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static pm_message_t pm_transition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static int async_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static const char *pm_verb(int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	case PM_EVENT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		return "suspend";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	case PM_EVENT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		return "resume";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	case PM_EVENT_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		return "freeze";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	case PM_EVENT_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		return "quiesce";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	case PM_EVENT_HIBERNATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		return "hibernate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	case PM_EVENT_THAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		return "thaw";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	case PM_EVENT_RESTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return "restore";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	case PM_EVENT_RECOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		return "recover";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return "(unknown PM event)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * device_pm_sleep_init - Initialize system suspend-related device fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * @dev: Device object being initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) void device_pm_sleep_init(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	dev->power.is_prepared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	dev->power.is_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	dev->power.is_noirq_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	dev->power.is_late_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	init_completion(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	dev->power.wakeup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	INIT_LIST_HEAD(&dev->power.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * device_pm_lock - Lock the list of active devices used by the PM core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) void device_pm_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * device_pm_unlock - Unlock the list of active devices used by the PM core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) void device_pm_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * device_pm_add - Add a device to the PM core's list of active devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * @dev: Device to add to the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) void device_pm_add(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/* Skip PM setup/initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (device_pm_not_required(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	pr_debug("Adding info for %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	device_pm_check_callbacks(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	if (dev->parent && dev->parent->power.is_prepared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		dev_warn(dev, "parent %s should not be sleeping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			dev_name(dev->parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	list_add_tail(&dev->power.entry, &dpm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	dev->power.in_dpm_list = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * device_pm_remove - Remove a device from the PM core's list of active devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * @dev: Device to be removed from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) void device_pm_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (device_pm_not_required(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	pr_debug("Removing info for %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	list_del_init(&dev->power.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	dev->power.in_dpm_list = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	device_wakeup_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	pm_runtime_remove(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	device_pm_check_callbacks(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * device_pm_move_before - Move device in the PM core's list of active devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * @deva: Device to move in dpm_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * @devb: Device @deva should come before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) void device_pm_move_before(struct device *deva, struct device *devb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	pr_debug("Moving %s:%s before %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/* Delete deva from dpm_list and reinsert before devb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	list_move_tail(&deva->power.entry, &devb->power.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * device_pm_move_after - Move device in the PM core's list of active devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * @deva: Device to move in dpm_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * @devb: Device @deva should come after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) void device_pm_move_after(struct device *deva, struct device *devb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	pr_debug("Moving %s:%s after %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	/* Delete deva from dpm_list and reinsert after devb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	list_move(&deva->power.entry, &devb->power.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * device_pm_move_last - Move device to end of the PM core's list of devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * @dev: Device to move in dpm_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) void device_pm_move_last(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	pr_debug("Moving %s:%s to end of list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	list_move_tail(&dev->power.entry, &dpm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static ktime_t initcall_debug_start(struct device *dev, void *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (!pm_print_times_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		 task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		 dev->parent ? dev_name(dev->parent) : "none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	return ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static void initcall_debug_report(struct device *dev, ktime_t calltime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 				  void *cb, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	ktime_t rettime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	s64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (!pm_print_times_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	rettime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		 (unsigned long long)nsecs >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * dpm_wait - Wait for a PM operation to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * @dev: Device to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * @async: If unset, wait only if the device's power.async_suspend flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void dpm_wait(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if (async || (pm_async_enabled && dev->power.async_suspend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		wait_for_completion(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static int dpm_wait_fn(struct device *dev, void *async_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	dpm_wait(dev, *((bool *)async_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static void dpm_wait_for_children(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)        device_for_each_child(dev, &async, dpm_wait_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static void dpm_wait_for_suppliers(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * If the supplier goes away right after we've checked the link to it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 * we'll wait for its completion to change the state, but that's fine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * because the only things that will block as a result are the SRCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 * callbacks freeing the link objects for the links in the list we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	 * walking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			dpm_wait(link->supplier, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static bool dpm_wait_for_superior(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct device *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * If the device is resumed asynchronously and the parent's callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * deletes both the device and the parent itself, the parent object may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 * be freed while this function is running, so avoid that by reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 * counting the parent once more unless the device has been deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * already (in which case return right away).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!device_pm_initialized(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	parent = get_device(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	dpm_wait(parent, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	put_device(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	dpm_wait_for_suppliers(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 * If the parent's callback has deleted the device, attempting to resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 * it would be invalid, so avoid doing that then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	return device_pm_initialized(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static void dpm_wait_for_consumers(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	 * The status of a device link can only be changed from "dormant" by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	 * probe, but that cannot happen during system suspend/resume.  In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 * theory it can change to "dormant" at that time, but then it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * reasonable to wait for the target device anyway (eg. if it goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 * away, it's better to wait for it to go away completely and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * continue instead of trying to continue in parallel with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 * unregistration).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			dpm_wait(link->consumer, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static void dpm_wait_for_subordinate(struct device *dev, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	dpm_wait_for_children(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	dpm_wait_for_consumers(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * pm_op - Return the PM operation appropriate for given PM event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * @ops: PM operations to choose from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	switch (state.event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	case PM_EVENT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return ops->suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	case PM_EVENT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		return ops->resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) #endif /* CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) #ifdef CONFIG_HIBERNATE_CALLBACKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	case PM_EVENT_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	case PM_EVENT_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return ops->freeze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	case PM_EVENT_HIBERNATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		return ops->poweroff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	case PM_EVENT_THAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	case PM_EVENT_RECOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		return ops->thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	case PM_EVENT_RESTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return ops->restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) #endif /* CONFIG_HIBERNATE_CALLBACKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  * pm_late_early_op - Return the PM operation appropriate for given PM event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * @ops: PM operations to choose from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * Runtime PM is disabled for @dev while this function is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 				      pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	switch (state.event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	case PM_EVENT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		return ops->suspend_late;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	case PM_EVENT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return ops->resume_early;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) #endif /* CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) #ifdef CONFIG_HIBERNATE_CALLBACKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	case PM_EVENT_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	case PM_EVENT_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		return ops->freeze_late;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	case PM_EVENT_HIBERNATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		return ops->poweroff_late;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	case PM_EVENT_THAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	case PM_EVENT_RECOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return ops->thaw_early;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	case PM_EVENT_RESTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return ops->restore_early;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #endif /* CONFIG_HIBERNATE_CALLBACKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * pm_noirq_op - Return the PM operation appropriate for given PM event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * @ops: PM operations to choose from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  * The driver of @dev will not receive interrupts while this function is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  * executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	switch (state.event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	case PM_EVENT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return ops->suspend_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	case PM_EVENT_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return ops->resume_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #endif /* CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #ifdef CONFIG_HIBERNATE_CALLBACKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	case PM_EVENT_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	case PM_EVENT_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return ops->freeze_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	case PM_EVENT_HIBERNATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		return ops->poweroff_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	case PM_EVENT_THAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	case PM_EVENT_RECOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		return ops->thaw_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	case PM_EVENT_RESTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		return ops->restore_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #endif /* CONFIG_HIBERNATE_CALLBACKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		", may wakeup" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	pr_err("Device %s failed to %s%s: error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	       dev_name(dev), pm_verb(state.event), info, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			  const char *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	ktime_t calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	u64 usecs64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	int usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	calltime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	do_div(usecs64, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	usecs = usecs64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (usecs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		usecs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		  info ?: "", info ? " " : "", pm_verb(state.event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		  error ? "aborted" : "complete",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) static int dpm_run_callback(pm_callback_t cb, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			    pm_message_t state, const char *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	ktime_t calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	calltime = initcall_debug_start(dev, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	pm_dev_dbg(dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	trace_device_pm_callback_start(dev, info, state.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	error = cb(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	trace_device_pm_callback_end(dev, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	suspend_report_result(cb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	initcall_debug_report(dev, calltime, cb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) #ifdef CONFIG_DPM_WATCHDOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) struct dpm_watchdog {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct task_struct	*tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	struct timer_list	timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct dpm_watchdog wd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * @t: The timer that PM watchdog depends on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * Called when a driver has timed out suspending or resuming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * There's not much we can do here to recover so panic() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * capture a crash-dump in pstore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static void dpm_watchdog_handler(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	show_stack(wd->tsk, NULL, KERN_EMERG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	panic("%s %s: unrecoverable failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		dev_driver_string(wd->dev), dev_name(wd->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * dpm_watchdog_set - Enable pm watchdog for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * @wd: Watchdog. Must be allocated on the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct timer_list *timer = &wd->timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	wd->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	wd->tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* use same timeout value for both suspend and resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	add_timer(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * dpm_watchdog_clear - Disable suspend/resume watchdog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * @wd: Watchdog to disable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void dpm_watchdog_clear(struct dpm_watchdog *wd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct timer_list *timer = &wd->timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	del_timer_sync(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	destroy_timer_on_stack(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) #define dpm_watchdog_set(x, y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) #define dpm_watchdog_clear(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) /*------------------------- Resume routines -------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * dev_pm_skip_resume - System-wide device resume optimization check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * @dev: Target device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * - %false if the transition under way is RESTORE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * - The logical negation of %power.must_resume otherwise (that is, when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *   transition under way is RESUME).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) bool dev_pm_skip_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (pm_transition.event == PM_EVENT_RESTORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (pm_transition.event == PM_EVENT_THAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return dev_pm_skip_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return !dev->power.must_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * device_resume_noirq - Execute a "noirq resume" callback for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * @async: If true, the device is being resumed asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * The driver of @dev will not receive interrupts while this function is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  * executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	bool skip_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	TRACE_RESUME(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (dev->power.syscore || dev->power.direct_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (!dev->power.is_noirq_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (!dpm_wait_for_superior(dev, async))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	skip_resume = dev_pm_skip_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 * If the driver callback is skipped below or by the middle layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * callback and device_resume_early() also skips the driver callback for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * this device later, it needs to appear as "suspended" to PM-runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * so change its status accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * to avoid confusing drivers that don't use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (skip_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	else if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		info = "noirq power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	} else if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		info = "noirq type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		callback = pm_noirq_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	} else if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		info = "noirq class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		callback = pm_noirq_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	} else if (dev->bus && dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		info = "noirq bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		callback = pm_noirq_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (skip_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		goto Skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		info = "noirq driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		callback = pm_noirq_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) Run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) Skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	dev->power.is_noirq_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) Out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	TRACE_RESUME(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static bool is_async(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return dev->power.async_suspend && pm_async_enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		&& !pm_trace_is_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static bool dpm_async_fn(struct device *dev, async_func_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	reinit_completion(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (is_async(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		async_schedule_dev(func, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static void async_resume_noirq(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	error = device_resume_noirq(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static void dpm_noirq_resume_devices(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * Advanced the async threads upfront,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 * in case the starting of async threads is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 * delayed by non-async resuming devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		dpm_async_fn(dev, async_resume_noirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	while (!list_empty(&dpm_noirq_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		dev = to_device(dpm_noirq_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (!is_async(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			error = device_resume_noirq(dev, state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				suspend_stats.failed_resume_noirq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				pm_dev_err(dev, state, " noirq", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	dpm_show_time(starttime, state, 0, "noirq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * allow device drivers' interrupt handlers to be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) void dpm_resume_noirq(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	dpm_noirq_resume_devices(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	resume_device_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	device_wakeup_disarm_wake_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	cpuidle_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * device_resume_early - Execute an "early resume" callback for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * @async: If true, the device is being resumed asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * Runtime PM is disabled for @dev while this function is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static int device_resume_early(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	TRACE_RESUME(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (dev->power.syscore || dev->power.direct_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (!dev->power.is_late_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (!dpm_wait_for_superior(dev, async))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		goto Out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		info = "early power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	} else if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		info = "early type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		callback = pm_late_early_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	} else if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		info = "early class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		callback = pm_late_early_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	} else if (dev->bus && dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		info = "early bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		callback = pm_late_early_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (dev_pm_skip_resume(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		goto Skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		info = "early driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		callback = pm_late_early_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) Run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) Skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	dev->power.is_late_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) Out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	TRACE_RESUME(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) static void async_resume_early(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	error = device_resume_early(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * dpm_resume_early - Execute "early resume" callbacks for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) void dpm_resume_early(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 * Advanced the async threads upfront,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * in case the starting of async threads is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * delayed by non-async resuming devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		dpm_async_fn(dev, async_resume_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	while (!list_empty(&dpm_late_early_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		dev = to_device(dpm_late_early_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		if (!is_async(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			error = device_resume_early(dev, state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				suspend_stats.failed_resume_early++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				pm_dev_err(dev, state, " early", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	dpm_show_time(starttime, state, 0, "early");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) void dpm_resume_start(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	dpm_resume_noirq(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	dpm_resume_early(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) EXPORT_SYMBOL_GPL(dpm_resume_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * device_resume - Execute "resume" callbacks for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * @async: If true, the device is being resumed asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static int device_resume(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	TRACE_RESUME(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (dev->power.syscore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (dev->power.direct_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		/* Match the pm_runtime_disable() in __device_suspend(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (!dpm_wait_for_superior(dev, async))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	dpm_watchdog_set(&wd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 * This is a fib.  But we'll allow new children to be added below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * a resumed device, even if the device hasn't been completed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	dev->power.is_prepared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (!dev->power.is_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		goto Unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		info = "power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		callback = pm_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		goto Driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		info = "type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		callback = pm_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		goto Driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		info = "class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		callback = pm_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		goto Driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (dev->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		if (dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			info = "bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			callback = pm_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		} else if (dev->bus->resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			info = "legacy bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			callback = dev->bus->resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			goto End;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  Driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (!callback && dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		info = "driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		callback = pm_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  End:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	dev->power.is_suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  Unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	dpm_watchdog_clear(&wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  Complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	TRACE_RESUME(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static void async_resume(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	error = device_resume(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * Execute the appropriate "resume" callback for all devices whose status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * indicates that they are suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) void dpm_resume(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	async_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		dpm_async_fn(dev, async_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	while (!list_empty(&dpm_suspended_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		dev = to_device(dpm_suspended_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (!is_async(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			error = device_resume(dev, state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				suspend_stats.failed_resume++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				dpm_save_failed_step(SUSPEND_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				pm_dev_err(dev, state, "", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		if (!list_empty(&dev->power.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	dpm_show_time(starttime, state, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	cpufreq_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	devfreq_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * device_complete - Complete a PM transition for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void device_complete(struct device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	void (*callback)(struct device *) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (dev->power.syscore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		info = "completing power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		callback = dev->pm_domain->ops.complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	} else if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		info = "completing type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		callback = dev->type->pm->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	} else if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		info = "completing class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		callback = dev->class->pm->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	} else if (dev->bus && dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		info = "completing bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		callback = dev->bus->pm->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (!callback && dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		info = "completing driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		callback = dev->driver->pm->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		pm_dev_dbg(dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		callback(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * dpm_complete - Complete a PM transition for all non-sysdev devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * Execute the ->complete() callbacks for all devices whose PM status is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * DPM_ON (this allows new devices to be registered).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) void dpm_complete(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	while (!list_empty(&dpm_prepared_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		struct device *dev = to_device(dpm_prepared_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		dev->power.is_prepared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		list_move(&dev->power.entry, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		trace_device_pm_callback_start(dev, "", state.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		device_complete(dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		trace_device_pm_callback_end(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	list_splice(&list, &dpm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/* Allow device probing and trigger re-probing of deferred devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	device_unblock_probing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * Execute "resume" callbacks for all devices and complete the PM transition of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) void dpm_resume_end(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	dpm_resume(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	dpm_complete(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) EXPORT_SYMBOL_GPL(dpm_resume_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*------------------------- Suspend routines -------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * resume_event - Return a "resume" message for given "suspend" sleep state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * @sleep_state: PM message representing a sleep state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * Return a PM message representing the resume event corresponding to given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * sleep state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static pm_message_t resume_event(pm_message_t sleep_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	switch (sleep_state.event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	case PM_EVENT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		return PMSG_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	case PM_EVENT_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	case PM_EVENT_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		return PMSG_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	case PM_EVENT_HIBERNATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return PMSG_RESTORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	return PMSG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void dpm_superior_set_must_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		dev->parent->power.must_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		link->supplier->power.must_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * @async: If true, the device is being suspended asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * The driver of @dev will not receive interrupts while this function is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	TRACE_SUSPEND(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	dpm_wait_for_subordinate(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	if (async_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (dev->power.syscore || dev->power.direct_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		info = "noirq power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	} else if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		info = "noirq type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		callback = pm_noirq_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	} else if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		info = "noirq class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		callback = pm_noirq_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	} else if (dev->bus && dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		info = "noirq bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		callback = pm_noirq_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		goto Skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		info = "noirq driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		callback = pm_noirq_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) Run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		async_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 					 dev_name(dev), pm_verb(state.event), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) Skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	dev->power.is_noirq_suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	 * Skipping the resume of devices that were in use right before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	 * system suspend (as indicated by their PM-runtime usage counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * would be suboptimal.  Also resume them if doing that is not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 * to be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (atomic_read(&dev->power.usage_count) > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	      dev->power.may_skip_resume))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		dev->power.must_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (dev->power.must_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		dpm_superior_set_must_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) Complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	TRACE_SUSPEND(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void async_suspend_noirq(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	error = __device_suspend_noirq(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int device_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (dpm_async_fn(dev, async_suspend_noirq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	return __device_suspend_noirq(dev, pm_transition, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int dpm_noirq_suspend_devices(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	async_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	while (!list_empty(&dpm_late_early_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		struct device *dev = to_device(dpm_late_early_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		error = device_suspend_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			pm_dev_err(dev, state, " noirq", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		if (!list_empty(&dev->power.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			list_move(&dev->power.entry, &dpm_noirq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		if (async_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		error = async_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		suspend_stats.failed_suspend_noirq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	dpm_show_time(starttime, state, error, "noirq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * Prevent device drivers' interrupt handlers from being called and invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * "noirq" suspend callbacks for all non-sysdev devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int dpm_suspend_noirq(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	cpuidle_pause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	device_wakeup_arm_wake_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	suspend_device_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	ret = dpm_noirq_suspend_devices(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		dpm_resume_noirq(resume_event(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static void dpm_propagate_wakeup_to_parent(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	struct device *parent = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	spin_lock_irq(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (dev->power.wakeup_path && !parent->power.ignore_children)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		parent->power.wakeup_path = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	spin_unlock_irq(&parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * __device_suspend_late - Execute a "late suspend" callback for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * @async: If true, the device is being suspended asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * Runtime PM is disabled for @dev while this function is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	TRACE_SUSPEND(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	__pm_runtime_disable(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	dpm_wait_for_subordinate(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	if (async_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (pm_wakeup_pending()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		async_error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	if (dev->power.syscore || dev->power.direct_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		info = "late power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	} else if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		info = "late type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		callback = pm_late_early_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	} else if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		info = "late class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		callback = pm_late_early_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	} else if (dev->bus && dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		info = "late bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		callback = pm_late_early_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (dev_pm_skip_suspend(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		goto Skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		info = "late driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		callback = pm_late_early_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) Run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		async_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		log_suspend_abort_reason("Device %s failed to %s late: error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 					 dev_name(dev), pm_verb(state.event), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	dpm_propagate_wakeup_to_parent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) Skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	dev->power.is_late_suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) Complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	TRACE_SUSPEND(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static void async_suspend_late(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	error = __device_suspend_late(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static int device_suspend_late(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (dpm_async_fn(dev, async_suspend_late))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return __device_suspend_late(dev, pm_transition, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int dpm_suspend_late(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	async_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	while (!list_empty(&dpm_suspended_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		struct device *dev = to_device(dpm_suspended_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		error = device_suspend_late(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		if (!list_empty(&dev->power.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			list_move(&dev->power.entry, &dpm_late_early_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			pm_dev_err(dev, state, " late", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		if (async_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		error = async_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		suspend_stats.failed_suspend_late++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		dpm_resume_early(resume_event(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	dpm_show_time(starttime, state, error, "late");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) int dpm_suspend_end(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	error = dpm_suspend_late(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	error = dpm_suspend_noirq(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		dpm_resume_early(resume_event(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	dpm_show_time(starttime, state, error, "end");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) EXPORT_SYMBOL_GPL(dpm_suspend_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * @cb: Suspend callback to execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * @info: string description of caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int legacy_suspend(struct device *dev, pm_message_t state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			  int (*cb)(struct device *dev, pm_message_t state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			  const char *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	ktime_t calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	calltime = initcall_debug_start(dev, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	trace_device_pm_callback_start(dev, info, state.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	error = cb(dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	trace_device_pm_callback_end(dev, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	suspend_report_result(cb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	initcall_debug_report(dev, calltime, cb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static void dpm_clear_superiors_direct_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (dev->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		spin_lock_irq(&dev->parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		dev->parent->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		spin_unlock_irq(&dev->parent->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	idx = device_links_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		spin_lock_irq(&link->supplier->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		link->supplier->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		spin_unlock_irq(&link->supplier->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	device_links_read_unlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * __device_suspend - Execute "suspend" callbacks for given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * @async: If true, the device is being suspended asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int __device_suspend(struct device *dev, pm_message_t state, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	pm_callback_t callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	const char *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	TRACE_DEVICE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	TRACE_SUSPEND(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	dpm_wait_for_subordinate(dev, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	if (async_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		dev->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 * Wait for possible runtime PM transitions of the device in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 * to complete and if there's a runtime resume request pending for it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 * resume it before proceeding with invoking the system-wide suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * callbacks for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 * If the system-wide suspend callbacks below change the configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 * of the device, they must disable runtime PM for it or otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	 * ensure that its runtime-resume callbacks will not be confused by that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	 * change in case they are invoked going forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	pm_runtime_barrier(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	if (pm_wakeup_pending()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		dev->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		async_error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (dev->power.syscore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	/* Avoid direct_complete to let wakeup_path propagate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (device_may_wakeup(dev) || dev->power.wakeup_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		dev->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	if (dev->power.direct_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		if (pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			if (pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 				pm_dev_dbg(dev, state, "direct-complete ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 				goto Complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		dev->power.direct_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	dev->power.may_skip_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	dpm_watchdog_set(&wd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	if (dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		info = "power domain ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		callback = pm_op(&dev->pm_domain->ops, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	if (dev->type && dev->type->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		info = "type ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		callback = pm_op(dev->type->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	if (dev->class && dev->class->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		info = "class ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		callback = pm_op(dev->class->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		goto Run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if (dev->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		if (dev->bus->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			info = "bus ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			callback = pm_op(dev->bus->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		} else if (dev->bus->suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			pm_dev_dbg(dev, state, "legacy bus ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			error = legacy_suspend(dev, state, dev->bus->suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 						"legacy bus ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			goto End;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  Run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	if (!callback && dev->driver && dev->driver->pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		info = "driver ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		callback = pm_op(dev->driver->pm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	error = dpm_run_callback(callback, dev, state, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  End:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		dev->power.is_suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			dev->power.wakeup_path = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		dpm_propagate_wakeup_to_parent(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		dpm_clear_superiors_direct_complete(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		log_suspend_abort_reason("Device %s failed to %s: error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 					 dev_name(dev), pm_verb(state.event), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	dpm_watchdog_clear(&wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  Complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		async_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	complete_all(&dev->power.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	TRACE_SUSPEND(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static void async_suspend(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	struct device *dev = (struct device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	error = __device_suspend(dev, pm_transition, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		pm_dev_err(dev, pm_transition, " async", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int device_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (dpm_async_fn(dev, async_suspend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	return __device_suspend(dev, pm_transition, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) int dpm_suspend(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	devfreq_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	cpufreq_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	pm_transition = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	async_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	while (!list_empty(&dpm_prepared_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		struct device *dev = to_device(dpm_prepared_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		error = device_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			pm_dev_err(dev, state, "", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		if (!list_empty(&dev->power.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			list_move(&dev->power.entry, &dpm_suspended_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		if (async_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	async_synchronize_full();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		error = async_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		suspend_stats.failed_suspend++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		dpm_save_failed_step(SUSPEND_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	dpm_show_time(starttime, state, error, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  * device_prepare - Prepare a device for system power transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * Execute the ->prepare() callback(s) for given device.  No new children of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * device may be registered after this function has returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static int device_prepare(struct device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	int (*callback)(struct device *) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	 * If a device's parent goes into runtime suspend at the wrong time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	 * it won't be possible to resume the device.  To prevent this we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	 * block runtime suspend here, during the prepare phase, and allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	 * it again during the complete phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	pm_runtime_get_noresume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	if (dev->power.syscore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	dev->power.wakeup_path = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	if (dev->power.no_pm_callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		callback = dev->pm_domain->ops.prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	else if (dev->type && dev->type->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		callback = dev->type->pm->prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	else if (dev->class && dev->class->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		callback = dev->class->pm->prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	else if (dev->bus && dev->bus->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		callback = dev->bus->pm->prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	if (!callback && dev->driver && dev->driver->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		callback = dev->driver->pm->prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		ret = callback(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		suspend_report_result(callback, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	 * A positive return value from ->prepare() means "this device appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	 * to be runtime-suspended and its state is fine, so if it really is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	 * runtime-suspended, you can leave it in that state provided that you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	 * will do the same thing with all of its descendants".  This only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	 * applies to suspend transitions, however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		(ret > 0 || dev->power.no_pm_callbacks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * Execute the ->prepare() callback(s) for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int dpm_prepare(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * Give a chance for the known devices to complete their probes, before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 * disable probing of devices. This sync point is important at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	 * at boot time + hibernation restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	wait_for_device_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	 * It is unsafe if probing of devices will happen during suspend or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	 * hibernation and system behavior will be unpredictable in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 * So, let's prohibit device's probing here and defer their probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 * instead. The normal behavior will be restored in dpm_complete().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	device_block_probing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	while (!list_empty(&dpm_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		struct device *dev = to_device(dpm_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		trace_device_pm_callback_start(dev, "", state.event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		error = device_prepare(dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		trace_device_pm_callback_end(dev, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		mutex_lock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 			if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 				put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 				error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			pr_info("Device %s not prepared for power transition: code %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 				dev_name(dev), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 						 dev_name(dev), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			dpm_save_failed_dev(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		dev->power.is_prepared = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		if (!list_empty(&dev->power.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	mutex_unlock(&dpm_list_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  * @state: PM transition of the system being carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * callbacks for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int dpm_suspend_start(pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	ktime_t starttime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	error = dpm_prepare(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		suspend_stats.failed_prepare++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		dpm_save_failed_step(SUSPEND_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		error = dpm_suspend(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	dpm_show_time(starttime, state, error, "start");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) EXPORT_SYMBOL_GPL(dpm_suspend_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) void __suspend_report_result(const char *function, void *fn, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		pr_err("%s(): %pS returns %d\n", function, fn, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) EXPORT_SYMBOL_GPL(__suspend_report_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  * @subordinate: Device that needs to wait for @dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  * @dev: Device to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	dpm_wait(dev, subordinate->power.async_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	return async_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  * dpm_for_each_dev - device iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  * @data: data for the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  * @fn: function to be called for each device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)  * Iterate over devices in dpm_list, and call @fn for each device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)  * passing it @data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	device_pm_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	list_for_each_entry(dev, &dpm_list, power.entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		fn(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	device_pm_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) EXPORT_SYMBOL_GPL(dpm_for_each_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	return !ops->prepare &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	       !ops->suspend &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	       !ops->suspend_late &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	       !ops->suspend_noirq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	       !ops->resume_noirq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	       !ops->resume_early &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	       !ops->resume &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	       !ops->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) void device_pm_check_callbacks(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	dev->power.no_pm_callbacks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		 !dev->bus->suspend && !dev->bus->resume)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		 !dev->driver->suspend && !dev->driver->resume));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) bool dev_pm_skip_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		pm_runtime_status_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }