Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * drivers/base/power/domain.c - Common code related to device power domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define pr_fmt(fmt) "PM: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/pm_opp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pm_domain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pm_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define GENPD_RETRY_MAX_MS	250		/* Approximate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	type (*__routine)(struct device *__d); 			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	type __ret = (type)0;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	__routine = genpd->dev_ops.callback; 			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	if (__routine) {					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		__ret = __routine(dev); 			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	__ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static LIST_HEAD(gpd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static DEFINE_MUTEX(gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) struct genpd_lock_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	void (*lock)(struct generic_pm_domain *genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	void (*unlock)(struct generic_pm_domain *genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static void genpd_lock_mtx(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	mutex_lock(&genpd->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 					int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	mutex_lock_nested(&genpd->mlock, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	return mutex_lock_interruptible(&genpd->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	return mutex_unlock(&genpd->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static const struct genpd_lock_ops genpd_mtx_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.lock = genpd_lock_mtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	.lock_nested = genpd_lock_nested_mtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.lock_interruptible = genpd_lock_interruptible_mtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	.unlock = genpd_unlock_mtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void genpd_lock_spin(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	__acquires(&genpd->slock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	spin_lock_irqsave(&genpd->slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	genpd->lock_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 					int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	__acquires(&genpd->slock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	genpd->lock_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	__acquires(&genpd->slock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	spin_lock_irqsave(&genpd->slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	genpd->lock_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void genpd_unlock_spin(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	__releases(&genpd->slock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static const struct genpd_lock_ops genpd_spin_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	.lock = genpd_lock_spin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	.lock_nested = genpd_lock_nested_spin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	.lock_interruptible = genpd_lock_interruptible_spin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	.unlock = genpd_unlock_spin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define genpd_lock(p)			p->lock_ops->lock(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define genpd_unlock(p)			p->lock_ops->unlock(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		const struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 * to indicate a suboptimal configuration for PM. For an always on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	 * domain this isn't case, thus don't warn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (ret && !genpd_is_always_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 				genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static int genpd_runtime_suspend(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * Get the generic PM domain for a particular struct device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * This validates the struct device pointer, the PM domain pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * and checks that the PM domain pointer is a real generic PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * Any failure results in NULL being returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		return pd_to_genpd(dev->pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * This should only be used where we are certain that the pm_domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * attached to the device is a genpd domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static struct generic_pm_domain *dev_to_genpd(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (IS_ERR_OR_NULL(dev->pm_domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return pd_to_genpd(dev->pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static int genpd_stop_dev(const struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			  struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static int genpd_start_dev(const struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			   struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		ret = !!atomic_dec_and_test(&genpd->sd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	atomic_inc(&genpd->sd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static struct dentry *genpd_debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static void genpd_debug_add(struct generic_pm_domain *genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void genpd_debug_remove(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	debugfs_remove(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static void genpd_update_accounting(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	ktime_t delta, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	delta = ktime_sub(now, genpd->accounting_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	 * If genpd->status is active, it means we are just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 * out of off and so update the idle time and vice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (genpd->status == GENPD_STATE_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		int state_idx = genpd->state_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		genpd->states[state_idx].idle_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			ktime_add(genpd->states[state_idx].idle_time, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		genpd->on_time = ktime_add(genpd->on_time, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	genpd->accounting_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 					   unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct generic_pm_domain_data *pd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct pm_domain_data *pdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/* New requested state is same as Max requested state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (state == genpd->performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/* New requested state is higher than Max requested state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (state > genpd->performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	/* Traverse all devices within the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		pd_data = to_gpd_data(pdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		if (pd_data->performance_state > state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			state = pd_data->performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * Traverse all sub-domains within the domain. This can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * done without any additional locking as the link->performance_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 * field is protected by the parent genpd->lock, which is already taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	 * Also note that link->performance_state (subdomain's performance state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	 * requirement to parent domain) is different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	 * link->child->performance_state (current performance state requirement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * of the devices/sub-domains of the subdomain) and so can have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * different value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 * Note that we also take vote from powered-off sub-domains into account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * as the same is done for devices right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		if (link->performance_state > state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			state = link->performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 					unsigned int state, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct generic_pm_domain *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int parent_state, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (state == genpd->performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	/* Propagate to parents of genpd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		parent = link->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		if (!parent->set_performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		/* Find parent's performance state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 							 parent->opp_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 							 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		parent_state = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		genpd_lock_nested(parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		link->prev_performance_state = link->performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		link->performance_state = parent_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		parent_state = _genpd_reeval_performance_state(parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 						parent_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			link->performance_state = link->prev_performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		genpd_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	ret = genpd->set_performance_state(genpd, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	genpd->performance_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* Encountered an error, lets rollback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 					     child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		parent = link->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		if (!parent->set_performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		genpd_lock_nested(parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		parent_state = link->prev_performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		link->performance_state = parent_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		parent_state = _genpd_reeval_performance_state(parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 						parent_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			pr_err("%s: Failed to roll back to %d performance state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			       parent->name, parent_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		genpd_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * dev_pm_genpd_set_performance_state- Set performance state of device's power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * @dev: Device for which the performance-state needs to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * @state: Target performance state of the device. This can be set as 0 when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  *	   device doesn't have any performance state constraints left (And so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  *	   the device wouldn't participate anymore to find the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  *	   performance state of the genpd).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * It is assumed that the users guarantee that the genpd wouldn't be detached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * while this routine is getting called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * Returns 0 on success and negative error values on failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	unsigned int prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (unlikely(!genpd->set_performance_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (WARN_ON(!dev->power.subsys_data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		     !dev->power.subsys_data->domain_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	prev = gpd_data->performance_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	gpd_data->performance_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	state = _genpd_reeval_performance_state(genpd, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	ret = _genpd_set_performance_state(genpd, state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		gpd_data->performance_state = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * @dev: Device to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * @next: impending interrupt/wakeup for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * Allow devices to inform of the next wakeup. It's assumed that the users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * guarantee that the genpd wouldn't be detached while this routine is getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * called. Additionally, it's also assumed that @dev isn't runtime suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * (RPM_SUSPENDED)."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * Although devices are expected to update the next_wakeup after the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * their usecase as well, it is possible the devices themselves may not know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * about that, so stale @next will be ignored when powering off the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	gpd_data->next_wakeup = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	unsigned int state_idx = genpd->state_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	ktime_t time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	s64 elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	/* Notify consumers that we are about to power on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 					     GENPD_NOTIFY_PRE_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 					     GENPD_NOTIFY_OFF, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	ret = notifier_to_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (!genpd->power_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (!timed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		ret = genpd->power_on(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	time_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	ret = genpd->power_on(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 genpd->name, "on", elapsed_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 				NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	unsigned int state_idx = genpd->state_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	ktime_t time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	s64 elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	/* Notify consumers that we are about to power off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 					     GENPD_NOTIFY_PRE_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 					     GENPD_NOTIFY_ON, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	ret = notifier_to_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (!genpd->power_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (!timed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		ret = genpd->power_off(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			goto busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	time_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	ret = genpd->power_off(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		goto busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		 genpd->name, "off", elapsed_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * @genpd: PM domain to power off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * Queue up the execution of genpd_power_off() unless it's already been done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	queue_work(pm_wq, &genpd->power_off_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * genpd_power_off - Remove power from a given PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * @genpd: PM domain to power down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * RPM status of the releated device is in an intermediate state, not yet turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * be RPM_SUSPENDED, while it tries to power off the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * If all of the @genpd's devices have been suspended and all of its subdomains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * have been powered down, remove power from @genpd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			   unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct pm_domain_data *pdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	unsigned int not_suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * Do not try to power off the domain in the following situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * (1) The domain is already in the "power off" state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * (2) System suspend is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 * Abort power off for the PM domain in the following situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	 * (1) The domain is configured as always on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * (2) When the domain has a subdomain being powered on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (genpd_is_always_on(genpd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			genpd_is_rpm_always_on(genpd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			atomic_read(&genpd->sd_count) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		enum pm_qos_flags_status stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		if (stat > PM_QOS_FLAGS_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		 * Do not allow PM domain to be powered off, when an IRQ safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		 * device is part of a non-IRQ safe domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		if (!pm_runtime_suspended(pdd->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			not_suspended++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (genpd->gov && genpd->gov->power_down_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if (!genpd->gov->power_down_ok(&genpd->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/* Default to shallowest state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (!genpd->gov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		genpd->state_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	/* Don't power off, if a child domain is waiting to power on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (atomic_read(&genpd->sd_count) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	ret = _genpd_power_off(genpd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		genpd->states[genpd->state_idx].rejected++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	genpd->status = GENPD_STATE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	genpd_update_accounting(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	genpd->states[genpd->state_idx].usage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		genpd_sd_counter_dec(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		genpd_lock_nested(link->parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		genpd_power_off(link->parent, false, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		genpd_unlock(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * genpd_power_on - Restore power to a given PM domain and its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * @genpd: PM domain to power up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * @depth: nesting count for lockdep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * Restore power to @genpd and all of its parents so that it is possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  * resume a device belonging to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (genpd_status_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 * The list is guaranteed not to change while the loop below is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * executed, unless one of the parents' .power_on() callbacks fiddles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		struct generic_pm_domain *parent = link->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		genpd_sd_counter_inc(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		genpd_lock_nested(parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		ret = genpd_power_on(parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		genpd_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			genpd_sd_counter_dec(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	ret = _genpd_power_on(genpd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	genpd->status = GENPD_STATE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	genpd_update_accounting(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	list_for_each_entry_continue_reverse(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 					&genpd->child_links,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 					child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		genpd_sd_counter_dec(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		genpd_lock_nested(link->parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		genpd_power_off(link->parent, false, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		genpd_unlock(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static int genpd_dev_pm_start(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	return genpd_start_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 				     unsigned long val, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	dev = gpd_data->base.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		struct pm_domain_data *pdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		pdd = dev->power.subsys_data ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				dev->power.subsys_data->domain_data : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		if (pdd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			to_gpd_data(pdd)->td.constraint_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			genpd = ERR_PTR(-ENODATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		if (!IS_ERR(genpd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		dev = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		if (!dev || dev->power.ignore_children)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * @work: Work structure used for scheduling the execution of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void genpd_power_off_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	genpd_power_off(genpd, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) static int __genpd_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int (*cb)(struct device *__dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (dev->type && dev->type->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		cb = dev->type->pm->runtime_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	else if (dev->class && dev->class->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		cb = dev->class->pm->runtime_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	else if (dev->bus && dev->bus->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		cb = dev->bus->pm->runtime_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (!cb && dev->driver && dev->driver->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		cb = dev->driver->pm->runtime_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return cb ? cb(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * @dev: Device to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) static int __genpd_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	int (*cb)(struct device *__dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (dev->type && dev->type->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		cb = dev->type->pm->runtime_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	else if (dev->class && dev->class->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		cb = dev->class->pm->runtime_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	else if (dev->bus && dev->bus->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		cb = dev->bus->pm->runtime_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (!cb && dev->driver && dev->driver->pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		cb = dev->driver->pm->runtime_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	return cb ? cb(dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * Carry out a runtime suspend of a device under the assumption that its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * pm_domain field points to the domain member of an object of type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static int genpd_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	bool (*suspend_ok)(struct device *__dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	bool runtime_pm = pm_runtime_enabled(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	ktime_t time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	s64 elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * callbacks for other purposes than runtime PM. In those scenarios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 * runtime PM is disabled. Under these circumstances, we shall skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * validating/measuring the PM QoS latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* Measure suspend latency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	time_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (runtime_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		time_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	ret = __genpd_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	ret = genpd_stop_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		__genpd_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	/* Update suspend latency value if the measured time exceeds it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		if (elapsed_ns > td->suspend_latency_ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			td->suspend_latency_ns = elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				elapsed_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			td->constraint_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * If power.irq_safe is set, this routine may be run with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	genpd_power_off(genpd, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * Carry out a runtime resume of a device under the assumption that its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * pm_domain field points to the domain member of an object of type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static int genpd_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	bool runtime_pm = pm_runtime_enabled(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	ktime_t time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	s64 elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	bool timed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 * As we don't power off a non IRQ safe domain, which holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 * an IRQ safe device, we don't need to restore power to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		timed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	ret = genpd_power_on(genpd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	/* Measure resume latency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	time_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (timed && runtime_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		time_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	ret = genpd_start_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		goto err_poweroff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	ret = __genpd_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		goto err_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/* Update resume latency value if the measured time exceeds it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (timed && runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (elapsed_ns > td->resume_latency_ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			td->resume_latency_ns = elapsed_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 				elapsed_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			td->constraint_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) err_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	genpd_stop_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) err_poweroff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (!pm_runtime_is_irq_safe(dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		genpd_power_off(genpd, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) static bool pd_ignore_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static int __init pd_ignore_unused_setup(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	pd_ignore_unused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) __setup("pd_ignore_unused", pd_ignore_unused_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  * genpd_power_off_unused - Power off all PM domains with no devices in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static int __init genpd_power_off_unused(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (pd_ignore_unused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		pr_warn("genpd: Not disabling unused power domains\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		genpd_queue_power_off_work(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) late_initcall(genpd_power_off_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * @genpd: PM domain to power off, if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * @use_lock: use the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * @depth: nesting count for lockdep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * Check if the given PM domain can be powered off (during system suspend or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * hibernation) and do that if so.  Also, in that case propagate to its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * This function is only called in "noirq" and "syscore" stages of system power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * these cases the lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				 unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (genpd->suspended_count != genpd->device_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	    || atomic_read(&genpd->sd_count) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/* Choose the deepest state when suspending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	genpd->state_idx = genpd->state_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (_genpd_power_off(genpd, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	genpd->status = GENPD_STATE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		genpd_sd_counter_dec(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			genpd_lock_nested(link->parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			genpd_unlock(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * @genpd: PM domain to power on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * @use_lock: use the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * @depth: nesting count for lockdep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * This function is only called in "noirq" and "syscore" stages of system power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  * these cases the lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (genpd_status_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		genpd_sd_counter_inc(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			genpd_lock_nested(link->parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			genpd_unlock(link->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	_genpd_power_on(genpd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	genpd->status = GENPD_STATE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  * resume_needed - Check whether to resume a device before system suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  * @dev: Device to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * @genpd: PM domain the device belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * There are two cases in which a device that can wake up the system from sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  * states should be resumed by genpd_prepare(): (1) if the device is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * to wake up the system and it has to remain active for this purpose while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * system is in the sleep state and (2) if the device is not enabled to wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * the system from sleep states and it generally doesn't generate wakeup signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * by itself (those signals are generated on its behalf by other parts of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * system).  In the latter case it may be necessary to reconfigure the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * wakeup settings during system suspend, because it may have been set up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * signal remote wakeup from the system's working state as needed by runtime PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * Return 'true' in either of the above cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static bool resume_needed(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			  const struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	bool active_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (!device_can_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	active_wakeup = genpd_is_active_wakeup(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * genpd_prepare - Start power transition of a device in a PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * @dev: Device to start the transition of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * Start a power transition of a device (during a system-wide power transition)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * under the assumption that its pm_domain field points to the domain member of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * an object of type struct generic_pm_domain representing a PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * consisting of I/O devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int genpd_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 * If a wakeup request is pending for the device, it should be woken up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * at this point and a system wakeup event should be reported if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * set up to wake up the system from sleep states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (resume_needed(dev, genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		pm_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	if (genpd->prepared_count++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		genpd->suspended_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	ret = pm_generic_prepare(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		genpd->prepared_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/* Never return 1, as genpd don't cope with the direct_complete path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	return ret >= 0 ? 0 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  *   I/O pm domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * Stop the device and remove power from the domain if all devices in it have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * been stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static int genpd_finish_suspend(struct device *dev, bool poweroff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (poweroff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		ret = pm_generic_poweroff_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		ret = pm_generic_suspend_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	    !pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		ret = genpd_stop_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			if (poweroff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				pm_generic_restore_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				pm_generic_resume_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	genpd->suspended_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	genpd_sync_power_off(genpd, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * @dev: Device to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * Stop the device and remove power from the domain if all devices in it have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * been stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static int genpd_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	return genpd_finish_suspend(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  * Restore power to the device's PM domain, if necessary, and start the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int genpd_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		return pm_generic_resume_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	genpd_sync_power_on(genpd, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	genpd->suspended_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	    !pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		ret = genpd_start_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return pm_generic_resume_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * @dev: Device to freeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * Carry out a late freeze of a device under the assumption that its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * pm_domain field points to the domain member of an object of type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * struct generic_pm_domain representing a power domain consisting of I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static int genpd_freeze_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	const struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	ret = pm_generic_freeze_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	    !pm_runtime_status_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		ret = genpd_stop_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  * @dev: Device to thaw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  * Start the device, unless power has been removed from the domain already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * before the system transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static int genpd_thaw_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	const struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	    !pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		ret = genpd_start_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	return pm_generic_thaw_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * genpd_poweroff_noirq - Completion of hibernation of device in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  *   I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * @dev: Device to poweroff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * Stop the device and remove power from the domain if all devices in it have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * been stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static int genpd_poweroff_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	return genpd_finish_suspend(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * @dev: Device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * Make sure the domain will be in the same power state as before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * hibernation the system is resuming from and start the device if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int genpd_restore_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	 * At this point suspended_count == 0 means we are being run for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 * first time for the given domain in the present cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (genpd->suspended_count++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		 * The boot kernel might put the domain into arbitrary state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		 * so make it appear as powered off to genpd_sync_power_on(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		 * so that it tries to power it on in case it was really off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		genpd->status = GENPD_STATE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	genpd_sync_power_on(genpd, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	    !pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		ret = genpd_start_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	return pm_generic_restore_noirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  * genpd_complete - Complete power transition of a device in a power domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * @dev: Device to complete the transition of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * Complete a power transition of a device (during a system-wide power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * transition) under the assumption that its pm_domain field points to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * domain member of an object of type struct generic_pm_domain representing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  * a power domain consisting of I/O devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static void genpd_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	genpd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	pm_generic_complete(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	genpd->prepared_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (!genpd->prepared_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		genpd_queue_power_off_work(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static void genpd_switch_state(struct device *dev, bool suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	bool use_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	use_lock = genpd_is_irq_safe(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		genpd->suspended_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		genpd_sync_power_off(genpd, use_lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		genpd_sync_power_on(genpd, use_lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		genpd->suspended_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (use_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  * @dev: The device that is attached to the genpd, that can be suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * This routine should typically be called for a device that needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  * suspended during the syscore suspend phase. It may also be called during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  * genpd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) void dev_pm_genpd_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	genpd_switch_state(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * @dev: The device that is attached to the genpd, which needs to be resumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * This routine should typically be called for a device that needs to be resumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  * during the syscore resume phase. It may also be called during suspend-to-idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * to resume a corresponding CPU device that is attached to a genpd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) void dev_pm_genpd_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	genpd_switch_state(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) #else /* !CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) #define genpd_prepare		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) #define genpd_suspend_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) #define genpd_resume_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) #define genpd_freeze_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) #define genpd_thaw_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) #define genpd_poweroff_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) #define genpd_restore_noirq	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) #define genpd_complete		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	ret = dev_pm_get_subsys_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (!gpd_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	gpd_data->base.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	gpd_data->td.constraint_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	gpd_data->next_wakeup = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (dev->power.subsys_data->domain_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	dev->power.subsys_data->domain_data = &gpd_data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	return gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	kfree(gpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	dev_pm_put_subsys_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static void genpd_free_dev_data(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				struct generic_pm_domain_data *gpd_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	dev->power.subsys_data->domain_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	kfree(gpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	dev_pm_put_subsys_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static void genpd_update_cpumask(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				 int cpu, bool set, unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (!genpd_is_cpu_domain(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	list_for_each_entry(link, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		struct generic_pm_domain *parent = link->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		genpd_lock_nested(parent, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		genpd_update_cpumask(parent, cpu, set, depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		genpd_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		cpumask_set_cpu(cpu, genpd->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		cpumask_clear_cpu(cpu, genpd->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		genpd_update_cpumask(genpd, cpu, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		genpd_update_cpumask(genpd, cpu, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	if (!genpd_is_cpu_domain(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		if (get_cpu_device(cpu) == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			    struct device *base_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	gpd_data = genpd_alloc_dev_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	if (IS_ERR(gpd_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		return PTR_ERR(gpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	genpd_set_cpumask(genpd, gpd_data->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	dev_pm_domain_set(dev, &genpd->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	genpd->device_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		genpd_free_dev_data(dev, gpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 					DEV_PM_QOS_RESUME_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * pm_genpd_add_device - Add a device to an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  * @genpd: PM domain to add the device to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  * @dev: Device to be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	ret = genpd_add_device(genpd, dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) EXPORT_SYMBOL_GPL(pm_genpd_add_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static int genpd_remove_device(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			       struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct pm_domain_data *pdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	dev_dbg(dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	pdd = dev->power.subsys_data->domain_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	gpd_data = to_gpd_data(pdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 				   DEV_PM_QOS_RESUME_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (genpd->prepared_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	genpd->device_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	genpd_clear_cpumask(genpd, gpd_data->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	dev_pm_domain_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	list_del_init(&pdd->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (genpd->detach_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		genpd->detach_dev(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	genpd_free_dev_data(dev, gpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  * @dev: Device to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int pm_genpd_remove_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	return genpd_remove_device(genpd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  * @dev: Device that should be associated with the notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * @nb: The notifier block to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * Users may call this function to add a genpd power on/off notifier for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  * attached @dev. Only one notifier per device is allowed. The notifier is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)  * sent when genpd is powering on/off the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)  * It is assumed that the user guarantee that the genpd wouldn't be detached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  * while this routine is getting called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  * Returns 0 on success and negative error values on failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	if (WARN_ON(!dev->power.subsys_data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		     !dev->power.subsys_data->domain_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (gpd_data->power_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			 genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	gpd_data->power_nb = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)  * @dev: Device that is associated with the notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * Users may call this function to remove a genpd power on/off notifier for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * attached @dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * It is assumed that the user guarantee that the genpd wouldn't be detached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  * while this routine is getting called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  * Returns 0 on success and negative error values on failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) int dev_pm_genpd_remove_notifier(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	struct generic_pm_domain_data *gpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	genpd = dev_to_genpd_safe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	if (WARN_ON(!dev->power.subsys_data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		     !dev->power.subsys_data->domain_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (!gpd_data->power_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 					    gpd_data->power_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			 genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	gpd_data->power_nb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static int genpd_add_subdomain(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			       struct generic_pm_domain *subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	struct gpd_link *link, *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	    || genpd == subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 * If the domain can be powered on/off in an IRQ safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	 * context, ensure that the subdomain can also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	 * powered on/off in that context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 				genpd->name, subdomain->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	link = kzalloc(sizeof(*link), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	genpd_lock(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		if (itr->child == subdomain && itr->parent == genpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	link->parent = genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	list_add_tail(&link->parent_node, &genpd->parent_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	link->child = subdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	list_add_tail(&link->child_node, &subdomain->child_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (genpd_status_on(subdomain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		genpd_sd_counter_inc(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	genpd_unlock(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)  * @genpd: Leader PM domain to add the subdomain to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  * @subdomain: Subdomain to be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			   struct generic_pm_domain *subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	ret = genpd_add_subdomain(genpd, subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * @genpd: Leader PM domain to remove the subdomain from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  * @subdomain: Subdomain to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 			      struct generic_pm_domain *subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	struct gpd_link *l, *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	genpd_lock(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		pr_warn("%s: unable to remove subdomain %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			genpd->name, subdomain->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		if (link->child != subdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		list_del(&link->parent_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		list_del(&link->child_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		if (genpd_status_on(subdomain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			genpd_sd_counter_dec(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	genpd_unlock(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static void genpd_free_default_power_state(struct genpd_power_state *states,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 					   unsigned int state_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	kfree(states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	struct genpd_power_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	state = kzalloc(sizeof(*state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	genpd->states = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	genpd->state_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	genpd->free_states = genpd_free_default_power_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static void genpd_lock_init(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		spin_lock_init(&genpd->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		genpd->lock_ops = &genpd_spin_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		mutex_init(&genpd->mlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		genpd->lock_ops = &genpd_mtx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  * pm_genpd_init - Initialize a generic I/O PM domain object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  * @genpd: PM domain object to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  * @gov: PM domain governor to associate with the domain (may be NULL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)  * @is_off: Initial value of the domain's power_is_off field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)  * Returns 0 on successful initialization, else a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) int pm_genpd_init(struct generic_pm_domain *genpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		  struct dev_power_governor *gov, bool is_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (IS_ERR_OR_NULL(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	INIT_LIST_HEAD(&genpd->parent_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	INIT_LIST_HEAD(&genpd->child_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	INIT_LIST_HEAD(&genpd->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	genpd_lock_init(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	genpd->gov = gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	atomic_set(&genpd->sd_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	genpd->device_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	genpd->max_off_time_ns = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	genpd->max_off_time_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	genpd->provider = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	genpd->has_provider = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	genpd->accounting_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	genpd->domain.ops.prepare = genpd_prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	genpd->domain.ops.complete = genpd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	genpd->domain.start = genpd_dev_pm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		genpd->dev_ops.stop = pm_clk_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		genpd->dev_ops.start = pm_clk_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	/* Always-on domains must be powered on at initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			!genpd_status_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if (genpd_is_cpu_domain(genpd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	/* Use only one "off" state if there were no states declared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (genpd->state_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		ret = genpd_set_default_power_state(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			if (genpd_is_cpu_domain(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				free_cpumask_var(genpd->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	} else if (!gov && genpd->state_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		pr_warn("%s: no governor for states\n", genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	device_initialize(&genpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	dev_set_name(&genpd->dev, "%s", genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	list_add(&genpd->gpd_list_node, &gpd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	genpd_debug_add(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) EXPORT_SYMBOL_GPL(pm_genpd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) static int genpd_remove(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	struct gpd_link *l, *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	if (IS_ERR_OR_NULL(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	if (genpd->has_provider) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		pr_err("Provider present, unable to remove %s\n", genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		list_del(&link->parent_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		list_del(&link->child_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	genpd_debug_remove(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	list_del(&genpd->gpd_list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	cancel_work_sync(&genpd->power_off_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (genpd_is_cpu_domain(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		free_cpumask_var(genpd->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (genpd->free_states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		genpd->free_states(genpd->states, genpd->state_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	pr_debug("%s: removed %s\n", __func__, genpd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)  * pm_genpd_remove - Remove a generic I/O PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)  * @genpd: Pointer to PM domain that is to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * To remove the PM domain, this function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  *  - Removes the PM domain as a subdomain to any parent domains,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  *    if it was added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  *  - Removes the PM domain from the list of registered PM domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * The PM domain will only be removed, if the associated provider has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * been removed, it is not a parent to any other PM domain and has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * devices associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) int pm_genpd_remove(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	ret = genpd_remove(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) EXPORT_SYMBOL_GPL(pm_genpd_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * Device Tree based PM domain providers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  * The code below implements generic device tree based PM domain providers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  * bind device tree nodes with generic PM domains registered in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)  * Any driver that registers generic PM domains and needs to support binding of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)  * devices to these domains is supposed to register a PM domain provider, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)  * maps a PM domain specifier retrieved from the device tree to a PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)  * Two simple mapping functions have been provided for convenience:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  *    index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)  * struct of_genpd_provider - PM domain provider registration structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)  * @link: Entry in global list of PM domain providers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)  * @node: Pointer to device tree node of PM domain provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)  *         into a PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)  * @data: context pointer to be passed into @xlate callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct of_genpd_provider {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	genpd_xlate_t xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* List of registered PM domain providers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) static LIST_HEAD(of_genpd_providers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* Mutex to protect the list above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) static DEFINE_MUTEX(of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  * @genpdspec: OF phandle args to map into a PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * @data: xlate function private data - pointer to struct generic_pm_domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * This is a generic xlate function that can be used to model PM domains that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * have their own device tree nodes. The private data of xlate function needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * to be a valid pointer to struct generic_pm_domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static struct generic_pm_domain *genpd_xlate_simple(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 					struct of_phandle_args *genpdspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 					void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  * genpd_xlate_onecell() - Xlate function using a single index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  * @genpdspec: OF phandle args to map into a PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * @data: xlate function private data - pointer to struct genpd_onecell_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  * This is a generic xlate function that can be used to model simple PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  * controllers that have one device tree node and provide multiple PM domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  * A single cell is used as an index into an array of PM domains specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  * the genpd_onecell_data struct when registering the provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static struct generic_pm_domain *genpd_xlate_onecell(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 					struct of_phandle_args *genpdspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 					void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	struct genpd_onecell_data *genpd_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	unsigned int idx = genpdspec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	if (genpdspec->args_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	if (idx >= genpd_data->num_domains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		pr_err("%s: invalid domain index %u\n", __func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	if (!genpd_data->domains[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	return genpd_data->domains[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)  * genpd_add_provider() - Register a PM domain provider for a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)  * @np: Device node pointer associated with the PM domain provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)  * @xlate: Callback for decoding PM domain from phandle arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)  * @data: Context pointer for @xlate callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			      void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct of_genpd_provider *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	if (!cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	cp->node = of_node_get(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	cp->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	cp->xlate = xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	fwnode_dev_initialized(&np->fwnode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	mutex_lock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	list_add(&cp->link, &of_genpd_providers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	mutex_unlock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	pr_debug("Added domain provider from %pOF\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static bool genpd_present(const struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	const struct generic_pm_domain *gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		if (gpd == genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  * of_genpd_add_provider_simple() - Register a simple PM domain provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)  * @np: Device node pointer associated with the PM domain provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)  * @genpd: Pointer to PM domain associated with the PM domain provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) int of_genpd_add_provider_simple(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				 struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	if (!np || !genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	if (!genpd_present(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	genpd->dev.of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	/* Parse genpd OPP table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if (genpd->set_performance_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		ret = dev_pm_opp_of_add_table(&genpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 				dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 					ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		 * Save table for faster processing while setting performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		 * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		WARN_ON(IS_ERR(genpd->opp_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		if (genpd->set_performance_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			dev_pm_opp_put_opp_table(genpd->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			dev_pm_opp_of_remove_table(&genpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	genpd->provider = &np->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	genpd->has_provider = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)  * @np: Device node pointer associated with the PM domain provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)  * @data: Pointer to the data associated with the PM domain provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) int of_genpd_add_provider_onecell(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 				  struct genpd_onecell_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	if (!np || !data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	if (!data->xlate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		data->xlate = genpd_xlate_onecell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	for (i = 0; i < data->num_domains; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		genpd = data->domains[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		if (!genpd_present(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		genpd->dev.of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		/* Parse genpd OPP table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		if (genpd->set_performance_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 				if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 					dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 						i, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 				goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			 * Save table for faster processing while setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 			 * performance state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			WARN_ON(IS_ERR(genpd->opp_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		genpd->provider = &np->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		genpd->has_provider = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	ret = genpd_add_provider(np, data->xlate, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		genpd = data->domains[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		if (!genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		genpd->provider = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		genpd->has_provider = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		if (genpd->set_performance_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			dev_pm_opp_put_opp_table(genpd->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 			dev_pm_opp_of_remove_table(&genpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)  * of_genpd_del_provider() - Remove a previously registered PM domain provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)  * @np: Device node pointer associated with the PM domain provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) void of_genpd_del_provider(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	struct of_genpd_provider *cp, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	struct generic_pm_domain *gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	mutex_lock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		if (cp->node == np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			 * For each PM domain associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			 * provider, set the 'has_provider' to false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			 * so that the PM domain can be safely removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 				if (gpd->provider == &np->fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 					gpd->has_provider = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 					if (!gpd->set_performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 					dev_pm_opp_put_opp_table(gpd->opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 					dev_pm_opp_of_remove_table(&gpd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 			fwnode_dev_initialized(&cp->node->fwnode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 			list_del(&cp->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 			of_node_put(cp->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 			kfree(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	mutex_unlock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) EXPORT_SYMBOL_GPL(of_genpd_del_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)  * genpd_get_from_provider() - Look-up PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)  * @genpdspec: OF phandle args to use for look-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)  * Looks for a PM domain provider under the node specified by @genpdspec and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)  * found, uses xlate function of the provider to map phandle args to a PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)  * domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)  * on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static struct generic_pm_domain *genpd_get_from_provider(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 					struct of_phandle_args *genpdspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	struct of_genpd_provider *provider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	if (!genpdspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	mutex_lock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	/* Check if we have such a provider in our array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	list_for_each_entry(provider, &of_genpd_providers, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		if (provider->node == genpdspec->np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			genpd = provider->xlate(genpdspec, provider->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		if (!IS_ERR(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	mutex_unlock(&of_genpd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	return genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)  * of_genpd_add_device() - Add a device to an I/O PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)  * @genpdspec: OF phandle args to use for look-up PM domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)  * @dev: Device to be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)  * Looks-up an I/O PM domain based upon phandle args provided and adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)  * the device to the PM domain. Returns a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	genpd = genpd_get_from_provider(genpdspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	if (IS_ERR(genpd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		ret = PTR_ERR(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	ret = genpd_add_device(genpd, dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) EXPORT_SYMBOL_GPL(of_genpd_add_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)  * @parent_spec: OF phandle args to use for parent PM domain look-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)  * @subdomain_spec: OF phandle args to use for subdomain look-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)  * Looks-up a parent PM domain and subdomain based upon phandle args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)  * provided and adds the subdomain to the parent PM domain. Returns a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)  * negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 			   struct of_phandle_args *subdomain_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	struct generic_pm_domain *parent, *subdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	parent = genpd_get_from_provider(parent_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	if (IS_ERR(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		ret = PTR_ERR(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	subdomain = genpd_get_from_provider(subdomain_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	if (IS_ERR(subdomain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		ret = PTR_ERR(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	ret = genpd_add_subdomain(parent, subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)  * @parent_spec: OF phandle args to use for parent PM domain look-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)  * @subdomain_spec: OF phandle args to use for subdomain look-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * Looks-up a parent PM domain and subdomain based upon phandle args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  * provided and removes the subdomain from the parent PM domain. Returns a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 			      struct of_phandle_args *subdomain_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	struct generic_pm_domain *parent, *subdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	parent = genpd_get_from_provider(parent_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	if (IS_ERR(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		ret = PTR_ERR(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	subdomain = genpd_get_from_provider(subdomain_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	if (IS_ERR(subdomain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		ret = PTR_ERR(subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	ret = pm_genpd_remove_subdomain(parent, subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  * of_genpd_remove_last - Remove the last PM domain registered for a provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)  * @provider: Pointer to device structure associated with provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  * Find the last PM domain that was added by a particular provider and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  * remove this PM domain from the list of PM domains. The provider is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)  * identified by the 'provider' device structure that is passed. The PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)  * domain will only be removed, if the provider associated with domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)  * has been removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)  * Returns a valid pointer to struct generic_pm_domain on success or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)  * ERR_PTR() on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	if (IS_ERR_OR_NULL(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		if (gpd->provider == &np->fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			ret = genpd_remove(gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 			genpd = ret ? ERR_PTR(ret) : gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	return genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) EXPORT_SYMBOL_GPL(of_genpd_remove_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) static void genpd_release_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	of_node_put(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static struct bus_type genpd_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	.name		= "genpd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  * genpd_dev_pm_detach - Detach a device from its PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  * @dev: Device to detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)  * @power_off: Currently not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)  * Try to locate a corresponding generic PM domain, which the device was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)  * attached to previously. If such is found, the device is detached from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) static void genpd_dev_pm_detach(struct device *dev, bool power_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	struct generic_pm_domain *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	pd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	if (IS_ERR(pd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		ret = genpd_remove_device(pd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		mdelay(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		dev_err(dev, "failed to remove from PM domain %s: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 			pd->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	/* Check if PM domain can be powered off after removing this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	genpd_queue_power_off_work(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	/* Unregister the device if it was created by genpd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	if (dev->bus == &genpd_bus_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		device_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) static void genpd_dev_pm_sync(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	struct generic_pm_domain *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	pd = dev_to_genpd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	if (IS_ERR(pd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	genpd_queue_power_off_work(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 				 unsigned int index, bool power_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	struct of_phandle_args pd_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	struct generic_pm_domain *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 				"#power-domain-cells", index, &pd_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	mutex_lock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	pd = genpd_get_from_provider(&pd_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	of_node_put(pd_args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	if (IS_ERR(pd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			__func__, PTR_ERR(pd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		return driver_deferred_probe_check_state(base_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	ret = genpd_add_device(pd, dev, base_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 			dev_err(dev, "failed to add to PM domain %s: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 				pd->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	dev->pm_domain->detach = genpd_dev_pm_detach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	dev->pm_domain->sync = genpd_dev_pm_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	if (power_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		genpd_lock(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		ret = genpd_power_on(pd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		genpd_unlock(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		genpd_remove_device(pd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	return ret ? -EPROBE_DEFER : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)  * @dev: Device to attach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)  * Parse device's OF node to find a PM domain specifier. If such is found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)  * attaches the device to retrieved pm_domain ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)  * PM domain or when multiple power-domains exists for it, else a negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)  * code. Note that if a power-domain exists for the device, but it cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)  * not probed and to re-try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) int genpd_dev_pm_attach(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	 * Devices with multiple PM domains must be attached separately, as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 * can only attach one PM domain per device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 				       "#power-domain-cells") != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	return __genpd_dev_pm_attach(dev, dev, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)  * @dev: The device used to lookup the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)  * @index: The index of the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)  * Parse device's OF node to find a PM domain specifier at the provided @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)  * If such is found, creates a virtual device and attaches it to the retrieved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  * Returns the created virtual device if successfully attached PM domain, NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  * when the device don't need a PM domain, else an ERR_PTR() in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  * failures. If a power-domain exists for the device, but cannot be found or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  * is not probed and to re-try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) struct device *genpd_dev_pm_attach_by_id(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 					 unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	struct device *virt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	int num_domains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	/* Verify that the index is within a valid range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 						 "#power-domain-cells");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	if (index >= num_domains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	/* Allocate and register device on the genpd bus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	if (!virt_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	virt_dev->bus = &genpd_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	virt_dev->release = genpd_release_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	virt_dev->of_node = of_node_get(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	ret = device_register(virt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		put_device(virt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	/* Try to attach the device to the PM domain at the specified index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	if (ret < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		device_unregister(virt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		return ret ? ERR_PTR(ret) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	pm_runtime_enable(virt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	return virt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)  * @dev: The device used to lookup the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)  * @name: The name of the PM domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)  * Parse device's OF node to find a PM domain specifier using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)  * power-domain-names DT property. For further description see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)  * genpd_dev_pm_attach_by_id().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	index = of_property_match_string(dev->of_node, "power-domain-names",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 					 name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	return genpd_dev_pm_attach_by_id(dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) static const struct of_device_id idle_state_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	{ .compatible = "domain-idle-state", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) static int genpd_parse_state(struct genpd_power_state *genpd_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 				    struct device_node *state_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	u32 residency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	u32 entry_latency, exit_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	err = of_property_read_u32(state_node, "entry-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 						&entry_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		pr_debug(" * %pOF missing entry-latency-us property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 			 state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	err = of_property_read_u32(state_node, "exit-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 						&exit_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		pr_debug(" * %pOF missing exit-latency-us property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 			 state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		genpd_state->residency_ns = 1000 * residency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	genpd_state->fwnode = &state_node->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static int genpd_iterate_idle_states(struct device_node *dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 				     struct genpd_power_state *states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	struct of_phandle_iterator it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		return ret == -ENOENT ? 0 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	/* Loop over the phandles until all the requested entry is found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		np = it.node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		if (!of_match_node(idle_state_match, np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		if (states) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 			ret = genpd_parse_state(&states[i], np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 				pr_err("Parsing idle state node %pOF failed with err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 				       np, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 				of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)  * @dn: The genpd device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)  * @states: The pointer to which the state array will be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)  * @n: The count of elements in the array returned from this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)  * Returns the device states parsed from the OF node. The memory for the states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)  * is allocated by this function and is the responsibility of the caller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)  * free the memory after use. If any or zero compatible domain idle states is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)  * found it returns 0 and in case of errors, a negative error code is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) int of_genpd_parse_idle_states(struct device_node *dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 			struct genpd_power_state **states, int *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	struct genpd_power_state *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	ret = genpd_iterate_idle_states(dn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		*states = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		*n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	if (!st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	ret = genpd_iterate_idle_states(dn, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		kfree(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		return ret < 0 ? ret : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	*states = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	*n = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)  *	state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)  * Returns performance state encoded in the OPP of the genpd. This calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)  * platform specific genpd->opp_to_performance_state() callback to translate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)  * power domain OPP to performance state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)  * Returns performance state on success and 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 					       struct dev_pm_opp *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	struct generic_pm_domain *genpd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	if (unlikely(!genpd->opp_to_performance_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	genpd_lock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	state = genpd->opp_to_performance_state(genpd, opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) static int __init genpd_bus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	return bus_register(&genpd_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) core_initcall(genpd_bus_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) /***        debugfs support        ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)  * TODO: This function is a slightly modified version of rtpm_status_show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)  * from sysfs.c, so generalize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) static void rtpm_status_str(struct seq_file *s, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	static const char * const status_lookup[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		[RPM_ACTIVE] = "active",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		[RPM_RESUMING] = "resuming",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		[RPM_SUSPENDED] = "suspended",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		[RPM_SUSPENDING] = "suspending"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	const char *p = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	if (dev->power.runtime_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		p = "error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	else if (dev->power.disable_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		p = "unsupported";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		p = status_lookup[dev->power.runtime_status];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	seq_puts(s, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static int genpd_summary_one(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 			struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	static const char * const status_lookup[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		[GENPD_STATE_ON] = "on",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		[GENPD_STATE_OFF] = "off"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	struct pm_domain_data *pm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	const char *kobj_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	char state[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	if (!genpd_status_on(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 		snprintf(state, sizeof(state), "%s-%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 			 status_lookup[genpd->status], genpd->state_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		snprintf(state, sizeof(state), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 			 status_lookup[genpd->status]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	 * Modifications on the list require holding locks on both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	 * parent and child, so we are safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	 * Also genpd->name is immutable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		seq_printf(s, "%s", link->child->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 			seq_puts(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 				genpd_is_irq_safe(genpd) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 				GFP_ATOMIC : GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		if (kobj_path == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		seq_printf(s, "\n    %-50s  ", kobj_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		rtpm_status_str(s, pm_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		kfree(kobj_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) static int summary_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	seq_puts(s, "domain                          status          children\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	seq_puts(s, "    /device                                             runtime status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	seq_puts(s, "----------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	ret = mutex_lock_interruptible(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		ret = genpd_summary_one(s, genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	mutex_unlock(&gpd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) static int status_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	static const char * const status_lookup[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 		[GENPD_STATE_ON] = "on",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		[GENPD_STATE_OFF] = "off"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	if (genpd->status == GENPD_STATE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 			genpd->state_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) static int sub_domains_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	struct gpd_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	list_for_each_entry(link, &genpd->parent_links, parent_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		seq_printf(s, "%s\n", link->child->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) static int idle_states_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	for (i = 0; i < genpd->state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		ktime_t delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		s64 msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		if ((genpd->status == GENPD_STATE_OFF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 				(genpd->state_idx == i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		msecs = ktime_to_ms(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			ktime_add(genpd->states[i].idle_time, delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 			      genpd->states[i].usage, genpd->states[i].rejected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) static int active_time_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	ktime_t delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	if (genpd->status == GENPD_STATE_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	seq_printf(s, "%lld ms\n", ktime_to_ms(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 				ktime_add(genpd->on_time, delta)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) static int total_idle_time_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	ktime_t delta = 0, total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	for (i = 0; i < genpd->state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		if ((genpd->status == GENPD_STATE_OFF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 				(genpd->state_idx == i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		total = ktime_add(total, genpd->states[i].idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	total = ktime_add(total, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) static int devices_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	struct pm_domain_data *pm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	const char *kobj_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	ret = genpd_lock_interruptible(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 				genpd_is_irq_safe(genpd) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 				GFP_ATOMIC : GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		if (kobj_path == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		seq_printf(s, "%s\n", kobj_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		kfree(kobj_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) static int perf_state_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	struct generic_pm_domain *genpd = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	if (genpd_lock_interruptible(genpd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	seq_printf(s, "%u\n", genpd->performance_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	genpd_unlock(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) DEFINE_SHOW_ATTRIBUTE(summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) DEFINE_SHOW_ATTRIBUTE(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) DEFINE_SHOW_ATTRIBUTE(sub_domains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) DEFINE_SHOW_ATTRIBUTE(idle_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) DEFINE_SHOW_ATTRIBUTE(active_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) DEFINE_SHOW_ATTRIBUTE(total_idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) DEFINE_SHOW_ATTRIBUTE(devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) DEFINE_SHOW_ATTRIBUTE(perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) static void genpd_debug_add(struct generic_pm_domain *genpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	if (!genpd_debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	debugfs_create_file("current_state", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 			    d, genpd, &status_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	debugfs_create_file("sub_domains", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 			    d, genpd, &sub_domains_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	debugfs_create_file("idle_states", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 			    d, genpd, &idle_states_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	debugfs_create_file("active_time", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 			    d, genpd, &active_time_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	debugfs_create_file("total_idle_time", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 			    d, genpd, &total_idle_time_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	debugfs_create_file("devices", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 			    d, genpd, &devices_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	if (genpd->set_performance_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		debugfs_create_file("perf_state", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 				    d, genpd, &perf_state_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) static int __init genpd_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	struct generic_pm_domain *genpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 			    NULL, &summary_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		genpd_debug_add(genpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) late_initcall(genpd_debug_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) static void __exit genpd_debug_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	debugfs_remove_recursive(genpd_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) __exitcall(genpd_debug_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) #endif /* CONFIG_DEBUG_FS */