^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk/clk-conf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/clkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static DEFINE_SPINLOCK(enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static DEFINE_MUTEX(prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct task_struct *prepare_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static struct task_struct *enable_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int prepare_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int enable_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static HLIST_HEAD(clk_root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static HLIST_HEAD(clk_orphan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static LIST_HEAD(clk_notifier_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct hlist_head *all_lists[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) &clk_root_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) &clk_orphan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*** private data structures ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct clk_parent_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) const struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) const char *fw_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct clk_core {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) const struct clk_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct device_node *of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct clk_core *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct clk_parent_map *parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u8 num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u8 new_parent_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long req_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct clk_core *new_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct clk_core *new_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bool orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool rpm_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool need_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bool boot_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int enable_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned int prepare_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int protect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned long accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct clk_duty duty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct hlist_head children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct hlist_node child_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct hlist_head clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int notifier_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct hlist_node debug_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <trace/events/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct clk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) const char *dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) const char *con_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int exclusive_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct hlist_node clks_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*** runtime pm ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int clk_pm_runtime_get(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!core->rpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ret = pm_runtime_get_sync(core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pm_runtime_put_noidle(core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void clk_pm_runtime_put(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!core->rpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pm_runtime_put_sync(core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*** locking ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void clk_prepare_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!mutex_trylock(&prepare_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (prepare_owner == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) prepare_refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mutex_lock(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) WARN_ON_ONCE(prepare_owner != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) WARN_ON_ONCE(prepare_refcnt != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) prepare_owner = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) prepare_refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void clk_prepare_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) WARN_ON_ONCE(prepare_owner != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) WARN_ON_ONCE(prepare_refcnt == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (--prepare_refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) prepare_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mutex_unlock(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static unsigned long clk_enable_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __acquires(enable_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * On UP systems, spin_trylock_irqsave() always returns true, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * we already hold the lock. So, in that case, we rely only on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!IS_ENABLED(CONFIG_SMP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) !spin_trylock_irqsave(&enable_lock, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (enable_owner == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enable_refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __acquire(enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!IS_ENABLED(CONFIG_SMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_irqsave(&enable_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) WARN_ON_ONCE(enable_owner != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) WARN_ON_ONCE(enable_refcnt != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) enable_owner = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) enable_refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void clk_enable_unlock(unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __releases(enable_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) WARN_ON_ONCE(enable_owner != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) WARN_ON_ONCE(enable_refcnt == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (--enable_refcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __release(enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) enable_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) spin_unlock_irqrestore(&enable_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static bool clk_core_rate_is_protected(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return core->protect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static bool clk_core_is_prepared(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * .is_prepared is optional for clocks that can prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * fall back to software usage counter if it is missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!core->ops->is_prepared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return core->prepare_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!clk_pm_runtime_get(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = core->ops->is_prepared(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static bool clk_core_is_enabled(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * .is_enabled is only mandatory for clocks that gate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * fall back to software usage counter if .is_enabled is missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!core->ops->is_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return core->enable_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Check if clock controller's device is runtime active before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * calling .is_enabled callback. If not, assume that clock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * disabled, because we might be called from atomic context, from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * which pm_runtime_get() is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * This function is called mainly from clk_disable_unused_subtree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * which ensures proper runtime pm activation of controller before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * taking enable spinlock, but the below check is needed if one tries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * to call it from other places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (core->rpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pm_runtime_get_noresume(core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!pm_runtime_active(core->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ret = core->ops->is_enabled(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (core->rpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pm_runtime_put(core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*** helper functions ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) const char *__clk_get_name(const struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return !clk ? NULL : clk->core->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) EXPORT_SYMBOL_GPL(__clk_get_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) const char *clk_hw_get_name(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return hw->core->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) EXPORT_SYMBOL_GPL(clk_hw_get_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct clk_hw *__clk_get_hw(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return !clk ? NULL : clk->core->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) EXPORT_SYMBOL_GPL(__clk_get_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return hw->core->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return hw->core->parent ? hw->core->parent->hw : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) EXPORT_SYMBOL_GPL(clk_hw_get_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static struct clk_core *__clk_lookup_subtree(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct clk_core *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!strcmp(core->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) hlist_for_each_entry(child, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret = __clk_lookup_subtree(name, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static struct clk_core *clk_core_lookup(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct clk_core *root_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct clk_core *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* search the 'proper' clk tree first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = __clk_lookup_subtree(name, root_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* if not found, then search the orphan tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ret = __clk_lookup_subtree(name, root_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int of_parse_clkspec(const struct device_node *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) const char *name, struct of_phandle_args *out_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline int of_parse_clkspec(const struct device_node *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct of_phandle_args *out_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static inline struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * clk_core_get - Find the clk_core parent of a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @core: clk to find parent of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @p_index: parent index to search for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * This is the preferred method for clk providers to find the parent of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * clk when that parent is external to the clk controller. The parent_names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * array is indexed and treated as a local name matching a string in the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * node's 'clock-names' property or as the 'con_id' matching the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * dev_name() in a clk_lookup. This allows clk providers to use their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * namespace instead of looking for a globally unique parent string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * For example the following DT snippet would allow a clock registered by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * clock-controller@c001 that has a clk_init_data::parent_data array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * with 'xtal' in the 'name' member to find the clock provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * clock-controller@f00abcd without needing to get the globally unique name of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * the xtal clk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * parent: clock-controller@f00abcd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * reg = <0xf00abcd 0xabcd>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * #clock-cells = <0>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * clock-controller@c001 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * reg = <0xc001 0xf00d>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * clocks = <&parent>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * clock-names = "xtal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * #clock-cells = <1>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Returns: -ENOENT when the provider can't be found or the clk doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * exist in the provider or the name can't be found in the DT node or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * in a clkdev lookup. NULL when the provider knows about the clk but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * isn't provided on this system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * A valid clk_core pointer when the clk can be found in the provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) const char *name = core->parents[p_index].fw_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int index = core->parents[p_index].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct clk_hw *hw = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct device *dev = core->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) const char *dev_id = dev ? dev_name(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct device_node *np = core->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct of_phandle_args clkspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (np && (name || index >= 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) !of_parse_clkspec(np, index, name, &clkspec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) hw = of_clk_get_hw_from_clkspec(&clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) of_node_put(clkspec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else if (name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * If the DT search above couldn't find the provider fallback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * looking up via clkdev based clk_lookups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) hw = clk_find_hw(dev_id, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (IS_ERR(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return ERR_CAST(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return hw->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct clk_parent_map *entry = &core->parents[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct clk_core *parent = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (entry->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) parent = entry->hw->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * We have a direct reference but it isn't registered yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Orphan it and let clk_reparent() update the orphan status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * when the parent is registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) parent = ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) parent = clk_core_get(core, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (PTR_ERR(parent) == -ENOENT && entry->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) parent = clk_core_lookup(entry->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Only cache it if it's not an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!IS_ERR(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) entry->core = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!core || index >= core->num_parents || !core->parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!core->parents[index].core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) clk_core_fill_parent_index(core, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return core->parents[index].core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct clk_core *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) parent = clk_core_get_parent_by_index(hw->core, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return !parent ? NULL : parent->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned int __clk_get_enable_count(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return !clk ? 0 : clk->core->enable_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!core->num_parents || core->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Clk must have a parent because num_parents > 0 but the parent isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * known yet. Best to return 0 as the rate of this clk until we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * properly recalc the rate based on the parent's rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned long clk_hw_get_rate(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return clk_core_get_rate_nolock(hw->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) EXPORT_SYMBOL_GPL(clk_hw_get_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return core->accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned long clk_hw_get_flags(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return hw->core->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) EXPORT_SYMBOL_GPL(clk_hw_get_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) bool clk_hw_is_prepared(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return clk_core_is_prepared(hw->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bool clk_hw_rate_is_protected(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return clk_core_rate_is_protected(hw->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bool clk_hw_is_enabled(const struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return clk_core_is_enabled(hw->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) bool __clk_is_enabled(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return clk_core_is_enabled(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) EXPORT_SYMBOL_GPL(__clk_is_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static bool mux_is_better_rate(unsigned long rate, unsigned long now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned long best, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (flags & CLK_MUX_ROUND_CLOSEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return abs(now - rate) < abs(best - rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return now <= rate && now > best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int clk_mux_determine_rate_flags(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct clk_rate_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct clk_core *core = hw->core, *parent, *best_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int i, num_parents, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned long best = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct clk_rate_request parent_req = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* if NO_REPARENT flag set, pass through to current parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (core->flags & CLK_SET_RATE_NO_REPARENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) parent = core->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (core->flags & CLK_SET_RATE_PARENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = __clk_determine_rate(parent ? parent->hw : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) &parent_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) best = parent_req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) } else if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) best = clk_core_get_rate_nolock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) best = clk_core_get_rate_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* find the parent that can provide the fastest rate <= rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) num_parents = core->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) for (i = 0; i < num_parents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) parent = clk_core_get_parent_by_index(core, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (core->flags & CLK_SET_RATE_PARENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) parent_req = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = __clk_determine_rate(parent->hw, &parent_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) parent_req.rate = clk_core_get_rate_nolock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (mux_is_better_rate(req->rate, parent_req.rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) best, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) best_parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) best = parent_req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!best_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (best_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) req->best_parent_hw = best_parent->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) req->best_parent_rate = best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) req->rate = best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct clk *__clk_lookup(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct clk_core *core = clk_core_lookup(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return !core ? NULL : core->hw->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void clk_core_get_boundaries(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned long *min_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long *max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct clk *clk_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) *min_rate = core->min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *max_rate = core->max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) hlist_for_each_entry(clk_user, &core->clks, clks_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *min_rate = max(*min_rate, clk_user->min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) hlist_for_each_entry(clk_user, &core->clks, clks_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *max_rate = min(*max_rate, clk_user->max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) unsigned long max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) hw->core->min_rate = min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) hw->core->max_rate = max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * @hw: mux type clk to determine rate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * @req: rate request, also used to return preferred parent and frequencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Helper for finding best parent to provide a given frequency. This can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * directly as a determine_rate callback (e.g. for a mux), or from a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * complex clock that may combine a mux with other operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Returns: 0 on success, -EERROR value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int __clk_mux_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return clk_mux_determine_rate_flags(hw, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int __clk_mux_determine_rate_closest(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*** clk api ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void clk_core_rate_unprotect(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (WARN(core->protect_count == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) "%s already unprotected\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (--core->protect_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) clk_core_rate_unprotect(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int clk_core_rate_nuke_protect(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (core->protect_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = core->protect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) core->protect_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) clk_core_rate_unprotect(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * clk_rate_exclusive_put - release exclusivity over clock rate control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * @clk: the clk over which the exclusivity is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * clk_rate_exclusive_put() completes a critical section during which a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * consumer cannot tolerate any other consumer making any operation on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * clock which could result in a rate change or rate glitch. Exclusive clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * cannot have their rate changed, either directly or indirectly due to changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * further up the parent chain of clocks. As a result, clocks up parent chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * also get under exclusive control of the calling consumer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * If exlusivity is claimed more than once on clock, even by the same consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * the rate effectively gets locked as exclusivity can't be preempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * Calls to clk_rate_exclusive_put() must be balanced with calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * error status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) void clk_rate_exclusive_put(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * if there is something wrong with this consumer protect count, stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * here before messing with the provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (WARN_ON(clk->exclusive_count <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) clk->exclusive_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static void clk_core_rate_protect(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (core->protect_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) clk_core_rate_protect(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) core->protect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static void clk_core_rate_restore_protect(struct clk_core *core, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) clk_core_rate_protect(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) core->protect_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * clk_rate_exclusive_get - get exclusivity over the clk rate control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @clk: the clk over which the exclusity of rate control is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * clk_rate_exclusive_get() begins a critical section during which a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * consumer cannot tolerate any other consumer making any operation on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * clock which could result in a rate change or rate glitch. Exclusive clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * cannot have their rate changed, either directly or indirectly due to changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * further up the parent chain of clocks. As a result, clocks up parent chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * also get under exclusive control of the calling consumer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * If exlusivity is claimed more than once on clock, even by the same consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * the rate effectively gets locked as exclusivity can't be preempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Calls to clk_rate_exclusive_get() should be balanced with calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * clk_rate_exclusive_put(). Calls to this function may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Returns 0 on success, -EERROR otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int clk_rate_exclusive_get(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) clk->exclusive_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void clk_core_unprepare(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (WARN(core->prepare_count == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) "%s already unprepared\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) "Unpreparing critical %s\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (core->flags & CLK_SET_RATE_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) clk_core_rate_unprotect(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (--core->prepare_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) trace_clk_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (core->ops->unprepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) core->ops->unprepare(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) trace_clk_unprepare_complete(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) clk_core_unprepare(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static void clk_core_unprepare_lock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) clk_core_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * clk_unprepare - undo preparation of a clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * @clk: the clk being unprepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * clk_unprepare may sleep, which differentiates it from clk_disable. In a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * if the operation may sleep. One example is a clk which is accessed over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * I2c. In the complex case a clk gate operation may require a fast and a slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * part. It is this reason that clk_unprepare and clk_disable are not mutually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * exclusive. In fact clk_disable must be called before clk_unprepare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) void clk_unprepare(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (IS_ERR_OR_NULL(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) clk_core_unprepare_lock(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) EXPORT_SYMBOL_GPL(clk_unprepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int clk_core_prepare(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (core->prepare_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ret = clk_pm_runtime_get(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ret = clk_core_prepare(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) goto runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) trace_clk_prepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (core->ops->prepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ret = core->ops->prepare(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) trace_clk_prepare_complete(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) core->prepare_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * CLK_SET_RATE_GATE is a special case of clock protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Instead of a consumer claiming exclusive rate control, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * actually the provider which prevents any consumer from making any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * operation which could result in a rate change or rate glitch while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * the clock is prepared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (core->flags & CLK_SET_RATE_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) clk_core_rate_protect(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) clk_core_unprepare(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) runtime_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int clk_core_prepare_lock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = clk_core_prepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * clk_prepare - prepare a clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * @clk: the clk being prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * operation may sleep. One example is a clk which is accessed over I2c. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * the complex case a clk ungate operation may require a fast and a slow part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * It is this reason that clk_prepare and clk_enable are not mutually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * exclusive. In fact clk_prepare must be called before clk_enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int clk_prepare(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return clk_core_prepare_lock(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) EXPORT_SYMBOL_GPL(clk_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void clk_core_disable(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) lockdep_assert_held(&enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) "Disabling critical %s\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (--core->enable_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) trace_clk_disable_rcuidle(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (core->ops->disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) core->ops->disable(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) trace_clk_disable_complete_rcuidle(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) clk_core_disable(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static void clk_core_disable_lock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) clk_core_disable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * clk_disable - gate a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * @clk: the clk being gated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * clk_disable must not sleep, which differentiates it from clk_unprepare. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * a simple case, clk_disable can be used instead of clk_unprepare to gate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * clk if the operation is fast and will never sleep. One example is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * SoC-internal clk which is controlled via simple register writes. In the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * complex case a clk gate operation may require a fast and a slow part. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * this reason that clk_unprepare and clk_disable are not mutually exclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * In fact clk_disable must be called before clk_unprepare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) void clk_disable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (IS_ERR_OR_NULL(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) clk_core_disable_lock(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) EXPORT_SYMBOL_GPL(clk_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static int clk_core_enable(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) lockdep_assert_held(&enable_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (WARN(core->prepare_count == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) "Enabling unprepared %s\n", core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (core->enable_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ret = clk_core_enable(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) trace_clk_enable_rcuidle(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (core->ops->enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret = core->ops->enable(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) trace_clk_enable_complete_rcuidle(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) clk_core_disable(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) core->enable_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static int clk_core_enable_lock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ret = clk_core_enable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * clk_gate_restore_context - restore context for poweroff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * @hw: the clk_hw pointer of clock whose state is to be restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * The clock gate restore context function enables or disables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * the gate clocks based on the enable_count. This is done in cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * where the clock context is lost and based on the enable_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * the clock either needs to be enabled/disabled. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * helps restore the state of gate clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) void clk_gate_restore_context(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct clk_core *core = hw->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (core->enable_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) core->ops->enable(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) core->ops->disable(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) EXPORT_SYMBOL_GPL(clk_gate_restore_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int clk_core_save_context(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) hlist_for_each_entry(child, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = clk_core_save_context(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (core->ops && core->ops->save_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = core->ops->save_context(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void clk_core_restore_context(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (core->ops && core->ops->restore_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) core->ops->restore_context(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) clk_core_restore_context(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * clk_save_context - save clock context for poweroff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * Saves the context of the clock register for powerstates in which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * contents of the registers will be lost. Occurs deep within the suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * code. Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int clk_save_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct clk_core *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) hlist_for_each_entry(clk, &clk_root_list, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ret = clk_core_save_context(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) ret = clk_core_save_context(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) EXPORT_SYMBOL_GPL(clk_save_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * clk_restore_context - restore clock context after poweroff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * Restore the saved clock context upon resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) void clk_restore_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) hlist_for_each_entry(core, &clk_root_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) clk_core_restore_context(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) hlist_for_each_entry(core, &clk_orphan_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) clk_core_restore_context(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) EXPORT_SYMBOL_GPL(clk_restore_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * clk_enable - ungate a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * @clk: the clk being ungated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * clk_enable must not sleep, which differentiates it from clk_prepare. In a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * if the operation will never sleep. One example is a SoC-internal clk which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * is controlled via simple register writes. In the complex case a clk ungate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * operation may require a fast and a slow part. It is this reason that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * must be called before clk_enable. Returns 0 on success, -EERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int clk_enable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return clk_core_enable_lock(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) EXPORT_SYMBOL_GPL(clk_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int clk_core_prepare_enable(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ret = clk_core_prepare_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = clk_core_enable_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) clk_core_unprepare_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static void clk_core_disable_unprepare(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) clk_core_disable_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) clk_core_unprepare_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static void __init clk_unprepare_unused_subtree(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) clk_unprepare_unused_subtree(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (dev_has_sync_state(core->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) !(core->flags & CLK_DONT_HOLD_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (core->prepare_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (core->flags & CLK_IGNORE_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (clk_pm_runtime_get(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (clk_core_is_prepared(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) trace_clk_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (core->ops->unprepare_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) core->ops->unprepare_unused(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) else if (core->ops->unprepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) core->ops->unprepare(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) trace_clk_unprepare_complete(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static void __init clk_disable_unused_subtree(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) clk_disable_unused_subtree(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (dev_has_sync_state(core->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) !(core->flags & CLK_DONT_HOLD_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (core->flags & CLK_OPS_PARENT_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) clk_core_prepare_enable(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (clk_pm_runtime_get(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) goto unprepare_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (core->enable_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (core->flags & CLK_IGNORE_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * some gate clocks have special needs during the disable-unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * sequence. call .disable_unused if available, otherwise fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * back to .disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (clk_core_is_enabled(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) trace_clk_disable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (core->ops->disable_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) core->ops->disable_unused(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) else if (core->ops->disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) core->ops->disable(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) trace_clk_disable_complete(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) unprepare_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (core->flags & CLK_OPS_PARENT_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) clk_core_disable_unprepare(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static bool clk_ignore_unused __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static int __init clk_ignore_unused_setup(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) clk_ignore_unused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) __setup("clk_ignore_unused", clk_ignore_unused_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int __init clk_disable_unused(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (clk_ignore_unused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) pr_warn("clk: Not disabling unused clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) hlist_for_each_entry(core, &clk_root_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) clk_disable_unused_subtree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) hlist_for_each_entry(core, &clk_orphan_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) clk_disable_unused_subtree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) hlist_for_each_entry(core, &clk_root_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) clk_unprepare_unused_subtree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) hlist_for_each_entry(core, &clk_orphan_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) clk_unprepare_unused_subtree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) late_initcall_sync(clk_disable_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) clk_unprepare_disable_dev_subtree(child, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (core->dev != dev || !core->need_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) clk_core_disable_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) void clk_sync_state(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) hlist_for_each_entry(core, &clk_root_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) clk_unprepare_disable_dev_subtree(core, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) hlist_for_each_entry(core, &clk_orphan_list, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) clk_unprepare_disable_dev_subtree(core, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) EXPORT_SYMBOL_GPL(clk_sync_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static int clk_core_determine_round_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * At this point, core protection will be disabled if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * - if the provider is not protected at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * - if the calling consumer is the only one which has exclusivity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * over the provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (clk_core_rate_is_protected(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) req->rate = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) } else if (core->ops->determine_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return core->ops->determine_rate(core->hw, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) } else if (core->ops->round_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) rate = core->ops->round_rate(core->hw, req->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) &req->best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (rate < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) req->rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static void clk_core_init_rate_req(struct clk_core * const core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct clk_core *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (WARN_ON(!core || !req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) parent = core->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) req->best_parent_hw = parent->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) req->best_parent_rate = parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) req->best_parent_hw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) req->best_parent_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static bool clk_core_can_round(struct clk_core * const core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return core->ops->determine_rate || core->ops->round_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int clk_core_round_rate_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!core) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) req->rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) clk_core_init_rate_req(core, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (clk_core_can_round(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return clk_core_determine_round_nolock(core, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) else if (core->flags & CLK_SET_RATE_PARENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return clk_core_round_rate_nolock(core->parent, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) req->rate = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * __clk_determine_rate - get the closest rate actually supported by a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * @hw: determine the rate of this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * @req: target rate request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * Useful for clk_ops such as .set_rate and .determine_rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (!hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) req->rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return clk_core_round_rate_nolock(hw->core, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) EXPORT_SYMBOL_GPL(__clk_determine_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * clk_hw_round_rate() - round the given rate for a hw clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * @hw: the hw clk for which we are rounding a rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * @rate: the rate which is to be rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Takes in a rate as input and rounds it to a rate that the clk can actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * Context: prepare_lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * For clk providers to call from within clk_ops such as .round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * .determine_rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * Return: returns rounded rate of hw clk if clk supports round_rate operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * else returns the parent rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct clk_rate_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) req.rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ret = clk_core_round_rate_nolock(hw->core, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) EXPORT_SYMBOL_GPL(clk_hw_round_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * clk_round_rate - round the given rate for a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * @clk: the clk for which we are rounding a rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * @rate: the rate which is to be rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Takes in a rate as input and rounds it to a rate that the clk can actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * use which is then returned. If clk doesn't support round_rate operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * then the parent rate is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) long clk_round_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct clk_rate_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) req.rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) ret = clk_core_round_rate_nolock(clk->core, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) EXPORT_SYMBOL_GPL(clk_round_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * __clk_notify - call clk notifier chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * @core: clk that is changing rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * @msg: clk notifier type (see include/linux/clk.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * @old_rate: old clk rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * @new_rate: new clk rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * Triggers a notifier call chain on the clk rate-change notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * for 'clk'. Passes a pointer to the struct clk and the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * and current rates to the notifier callback. Intended to be called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * internal clock code only. Returns NOTIFY_DONE from the last driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * a driver returns that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int __clk_notify(struct clk_core *core, unsigned long msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) unsigned long old_rate, unsigned long new_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct clk_notifier *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct clk_notifier_data cnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) cnd.old_rate = old_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) cnd.new_rate = new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) list_for_each_entry(cn, &clk_notifier_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (cn->clk->core == core) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) cnd.clk = cn->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) &cnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (ret & NOTIFY_STOP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * __clk_recalc_accuracies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * @core: first clk in the subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * Walks the subtree of clks starting with clk and recalculates accuracies as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * it goes. Note that if a clk does not implement the .recalc_accuracy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * callback then it is assumed that the clock will take on the accuracy of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static void __clk_recalc_accuracies(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) unsigned long parent_accuracy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (core->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) parent_accuracy = core->parent->accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (core->ops->recalc_accuracy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) core->accuracy = core->ops->recalc_accuracy(core->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) parent_accuracy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) core->accuracy = parent_accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) __clk_recalc_accuracies(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static long clk_core_get_accuracy_recalc(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) __clk_recalc_accuracies(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return clk_core_get_accuracy_no_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * clk_get_accuracy - return the accuracy of clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * @clk: the clk whose accuracy is being returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * Simply returns the cached accuracy of the clk, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * If clk is NULL then returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) long clk_get_accuracy(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) long accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) accuracy = clk_core_get_accuracy_recalc(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) EXPORT_SYMBOL_GPL(clk_get_accuracy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static unsigned long clk_recalc(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) unsigned long rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) rate = core->ops->recalc_rate(core->hw, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * __clk_recalc_rates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * @core: first clk in the subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * @msg: notification type (see include/linux/clk.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Walks the subtree of clks starting with clk and recalculates rates as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * goes. Note that if a clk does not implement the .recalc_rate callback then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * it is assumed that the clock will take on the rate of its parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) unsigned long old_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) unsigned long parent_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) old_rate = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (core->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) parent_rate = core->parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) core->rate = clk_recalc(core, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * & ABORT_RATE_CHANGE notifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (core->notifier_count && msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) __clk_notify(core, msg, old_rate, core->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) __clk_recalc_rates(child, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (core && (core->flags & CLK_GET_RATE_NOCACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) __clk_recalc_rates(core, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return clk_core_get_rate_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * clk_get_rate - return the rate of clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * @clk: the clk whose rate is being returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * is set, which means a recalc_rate will be issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * If clk is NULL then returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) unsigned long clk_get_rate(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) rate = clk_core_get_rate_recalc(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) EXPORT_SYMBOL_GPL(clk_get_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static int clk_fetch_parent_index(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct clk_core *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) for (i = 0; i < core->num_parents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /* Found it first try! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (core->parents[i].core == parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* Something else is here, so keep looking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (core->parents[i].core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /* Maybe core hasn't been cached but the hw is all we know? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (core->parents[i].hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (core->parents[i].hw == parent->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /* Didn't match, but we're expecting a clk_hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Maybe it hasn't been cached (clk_set_parent() path) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (parent == clk_core_get(core, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /* Fallback to comparing globally unique names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (core->parents[i].name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) !strcmp(parent->name, core->parents[i].name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (i == core->num_parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) core->parents[i].core = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * clk_hw_get_parent_index - return the index of the parent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * @hw: clk_hw associated with the clk being consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * Fetches and returns the index of parent clock. Returns -EINVAL if the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * clock does not have a current parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) int clk_hw_get_parent_index(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct clk_hw *parent = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (WARN_ON(parent == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return clk_fetch_parent_index(hw->core, parent->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static void clk_core_hold_state(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (core->need_sync || !core->boot_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (core->orphan || !dev_has_sync_state(core->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (core->flags & CLK_DONT_HOLD_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) core->need_sync = !clk_core_prepare_enable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void __clk_core_update_orphan_hold_state(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (core->orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) clk_core_hold_state(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) __clk_core_update_orphan_hold_state(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * Update the orphan status of @core and all its children.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) core->orphan = is_orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) hlist_for_each_entry(child, &core->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) clk_core_update_orphan_status(child, is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) bool was_orphan = core->orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) hlist_del(&core->child_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (new_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) bool becomes_orphan = new_parent->orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* avoid duplicate POST_RATE_CHANGE notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (new_parent->new_child == core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) new_parent->new_child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) hlist_add_head(&core->child_node, &new_parent->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (was_orphan != becomes_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) clk_core_update_orphan_status(core, becomes_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) hlist_add_head(&core->child_node, &clk_orphan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (!was_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) clk_core_update_orphan_status(core, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) core->parent = new_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static struct clk_core *__clk_set_parent_before(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct clk_core *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct clk_core *old_parent = core->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * 2. Migrate prepare state between parents and prevent race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * clk_enable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * If the clock is not prepared, then a race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * clk_enable/disable() is impossible since we already have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * prepare lock (future calls to clk_enable() need to be preceded by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * a clk_prepare()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * If the clock is prepared, migrate the prepared state to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * parent and also protect against a race with clk_enable() by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * forcing the clock and the new parent on. This ensures that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * future calls to clk_enable() are practically NOPs with respect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * hardware and software states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * See also: Comment for clk_set_parent() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (core->flags & CLK_OPS_PARENT_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) clk_core_prepare_enable(old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) clk_core_prepare_enable(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* migrate prepare count if > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (core->prepare_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) clk_core_prepare_enable(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) clk_core_enable_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* update the clk tree topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) clk_reparent(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return old_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static void __clk_set_parent_after(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct clk_core *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct clk_core *old_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * Finish the migration of prepare state and undo the changes done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * for preventing a race with clk_enable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (core->prepare_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) clk_core_disable_lock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) clk_core_disable_unprepare(old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (core->flags & CLK_OPS_PARENT_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) clk_core_disable_unprepare(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) clk_core_disable_unprepare(old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) u8 p_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) struct clk_core *old_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) old_parent = __clk_set_parent_before(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) trace_clk_set_parent(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /* change clock input source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (parent && core->ops->set_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ret = core->ops->set_parent(core->hw, p_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) trace_clk_set_parent_complete(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) clk_reparent(core, old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) __clk_set_parent_after(core, old_parent, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) __clk_set_parent_after(core, parent, old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * __clk_speculate_rates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * @core: first clk in the subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * @parent_rate: the "future" rate of clk's parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * Walks the subtree of clks starting with clk, speculating rates as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * goes and firing off PRE_RATE_CHANGE notifications as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * pre-rate change notifications and returns early if no clks in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * subtree have subscribed to the notifications. Note that if a clk does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * implement the .recalc_rate callback then it is assumed that the clock will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * take on the rate of its parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static int __clk_speculate_rates(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) unsigned long new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) new_rate = clk_recalc(core, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (core->notifier_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (ret & NOTIFY_STOP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) __func__, core->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) hlist_for_each_entry(child, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) ret = __clk_speculate_rates(child, new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (ret & NOTIFY_STOP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct clk_core *new_parent, u8 p_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) core->new_rate = new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) core->new_parent = new_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) core->new_parent_index = p_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* include clk in new parent's PRE_RATE_CHANGE notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) core->new_child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (new_parent && new_parent != core->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) new_parent->new_child = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) hlist_for_each_entry(child, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) child->new_rate = clk_recalc(child, new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) clk_calc_subtree(child, child->new_rate, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * calculate the new rates returning the topmost clock that has to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) static struct clk_core *clk_calc_new_rates(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct clk_core *top = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct clk_core *old_parent, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) unsigned long best_parent_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) unsigned long new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) unsigned long min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) unsigned long max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) int p_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* sanity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (IS_ERR_OR_NULL(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* save parent rate, if it exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) parent = old_parent = core->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) best_parent_rate = parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) clk_core_get_boundaries(core, &min_rate, &max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /* find the closest rate and parent clk/rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (clk_core_can_round(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct clk_rate_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) req.rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) req.min_rate = min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) req.max_rate = max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) clk_core_init_rate_req(core, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ret = clk_core_determine_round_nolock(core, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) best_parent_rate = req.best_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) new_rate = req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (new_rate < min_rate || new_rate > max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /* pass-through clock without adjustable parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) core->new_rate = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /* pass-through clock with adjustable parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) top = clk_calc_new_rates(parent, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) new_rate = parent->new_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) /* some clocks must be gated to change parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (parent != old_parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) pr_debug("%s: %s not gated but wants to reparent\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /* try finding the new parent index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (parent && core->num_parents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) p_index = clk_fetch_parent_index(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (p_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) pr_debug("%s: clk %s can not be parent of clk %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) __func__, parent->name, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) best_parent_rate != parent->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) top = clk_calc_new_rates(parent, best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) clk_calc_subtree(core, new_rate, parent, p_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * Notify about rate changes in a subtree. Always walk down the whole tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * so that in case of an error we can walk down the whole tree again and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * abort the change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct clk_core *child, *tmp_clk, *fail_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (core->rate == core->new_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (core->notifier_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) ret = __clk_notify(core, event, core->rate, core->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (ret & NOTIFY_STOP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) fail_clk = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (core->ops->pre_rate_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) ret = core->ops->pre_rate_change(core->hw, core->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) core->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) fail_clk = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) hlist_for_each_entry(child, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) /* Skip children who will be reparented to another clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (child->new_parent && child->new_parent != core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) tmp_clk = clk_propagate_rate_change(child, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (tmp_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) fail_clk = tmp_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /* handle the new child who might not be in core->children yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (core->new_child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) tmp_clk = clk_propagate_rate_change(core->new_child, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (tmp_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) fail_clk = tmp_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return fail_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * walk down a subtree and set the new rates notifying the rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * change on the way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) static void clk_change_rate(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) unsigned long old_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) unsigned long best_parent_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) bool skip_set_rate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct clk_core *old_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct clk_core *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) old_rate = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (core->new_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) parent = core->new_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) best_parent_rate = core->new_parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) } else if (core->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) parent = core->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) best_parent_rate = core->parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (clk_pm_runtime_get(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (core->flags & CLK_SET_RATE_UNGATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) clk_core_prepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) clk_core_enable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (core->new_parent && core->new_parent != core->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) old_parent = __clk_set_parent_before(core, core->new_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) trace_clk_set_parent(core, core->new_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (core->ops->set_rate_and_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) skip_set_rate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) core->ops->set_rate_and_parent(core->hw, core->new_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) best_parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) core->new_parent_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) } else if (core->ops->set_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) core->ops->set_parent(core->hw, core->new_parent_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) trace_clk_set_parent_complete(core, core->new_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) __clk_set_parent_after(core, core->new_parent, old_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (core->flags & CLK_OPS_PARENT_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) clk_core_prepare_enable(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) trace_clk_set_rate(core, core->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (!skip_set_rate && core->ops->set_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) trace_clk_set_rate_complete(core, core->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) core->rate = clk_recalc(core, best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (core->flags & CLK_SET_RATE_UNGATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) clk_core_disable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) clk_core_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (core->flags & CLK_OPS_PARENT_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) clk_core_disable_unprepare(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (core->notifier_count && old_rate != core->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (core->flags & CLK_RECALC_NEW_RATES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) (void)clk_calc_new_rates(core, core->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (core->ops->post_rate_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) core->ops->post_rate_change(core->hw, old_rate, core->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * Use safe iteration, as change_rate can actually swap parents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * for certain clock types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) /* Skip children who will be reparented to another clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (child->new_parent && child->new_parent != core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) clk_change_rate(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* handle the new child who might not be in core->children yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (core->new_child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) clk_change_rate(core->new_child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) unsigned long req_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) int ret, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct clk_rate_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /* simulate what the rate would be if it could be freely set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) cnt = clk_core_rate_nuke_protect(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) req.rate = req_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ret = clk_core_round_rate_nolock(core, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* restore the protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) clk_core_rate_restore_protect(core, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return ret ? 0 : req.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static int clk_core_set_rate_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) unsigned long req_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct clk_core *top, *fail_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) rate = clk_core_req_round_rate_nolock(core, req_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /* bail early if nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (rate == clk_core_get_rate_nolock(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /* fail on a direct rate set of a protected provider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (clk_core_rate_is_protected(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /* calculate new rates and get the topmost changed clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) top = clk_calc_new_rates(core, req_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (!top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ret = clk_pm_runtime_get(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /* notify that we are about to change rates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (fail_clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) pr_debug("%s: failed to set %s rate\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) fail_clk->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /* change the rates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) clk_change_rate(top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) core->req_rate = req_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * clk_set_rate - specify a new rate for clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * @clk: the clk whose rate is being changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * @rate: the new rate for clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * In the simplest case clk_set_rate will only adjust the rate of clk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * propagate up to clk's parent; whether or not this happens depends on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * after calling .round_rate then upstream parent propagation is ignored. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * *parent_rate comes back with a new rate for clk's parent then we propagate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * up to clk's parent and set its rate. Upward propagation will continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * until either a clk does not support the CLK_SET_RATE_PARENT flag or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * .round_rate stops requesting changes to clk's parent_rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * Rate changes are accomplished via tree traversal that also recalculates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int clk_set_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /* prevent racing with updates to the clock topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ret = clk_core_set_rate_nolock(clk->core, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) EXPORT_SYMBOL_GPL(clk_set_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * clk_set_rate_exclusive - specify a new rate and get exclusive control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * @clk: the clk whose rate is being changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) * @rate: the new rate for clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * within a critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * This can be used initially to ensure that at least 1 consumer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * satisfied when several consumers are competing for exclusivity over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * same clock provider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * The exclusivity is not applied if setting the rate failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * Calls to clk_rate_exclusive_get() should be balanced with calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * clk_rate_exclusive_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* prevent racing with updates to the clock topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * The temporary protection removal is not here, on purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) * This function is meant to be used instead of clk_rate_protect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) * so before the consumer code path protect the clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) ret = clk_core_set_rate_nolock(clk->core, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) clk->exclusive_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * clk_set_rate_range - set a rate range for a clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * @clk: clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * @min: desired minimum clock rate in Hz, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * @max: desired maximum clock rate in Hz, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * Returns success (0) or negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) unsigned long old_min, old_max, rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (min > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) __func__, clk->core->name, clk->dev_id, clk->con_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /* Save the current values in case we need to rollback the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) old_min = clk->min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) old_max = clk->max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) clk->min_rate = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) clk->max_rate = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) rate = clk_core_get_rate_nolock(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (rate < min || rate > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * FIXME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * We are in bit of trouble here, current rate is outside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * the requested range. We are going try to request appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * range boundary but there is a catch. It may fail for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * usual reason (clock broken, clock protected, etc) but also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * - round_rate() was not favorable and fell on the wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * side of the boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * - the determine_rate() callback does not really check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * this corner case when determining the rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (rate < min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) rate = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) rate = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) ret = clk_core_set_rate_nolock(clk->core, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* rollback the changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) clk->min_rate = old_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) clk->max_rate = old_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) EXPORT_SYMBOL_GPL(clk_set_rate_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * clk_set_min_rate - set a minimum clock rate for a clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * @clk: clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * @rate: desired minimum clock rate in Hz, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * Returns success (0) or negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) int clk_set_min_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return clk_set_rate_range(clk, rate, clk->max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) EXPORT_SYMBOL_GPL(clk_set_min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) * clk_set_max_rate - set a maximum clock rate for a clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * @clk: clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * @rate: desired maximum clock rate in Hz, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * Returns success (0) or negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) int clk_set_max_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) return clk_set_rate_range(clk, clk->min_rate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) EXPORT_SYMBOL_GPL(clk_set_max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * clk_get_parent - return the parent of a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) * @clk: the clk whose parent gets returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * Simply returns clk->parent. Returns NULL if clk is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct clk *clk_get_parent(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct clk *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /* TODO: Create a per-user clk and change callers to call clk_put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) EXPORT_SYMBOL_GPL(clk_get_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) static struct clk_core *__clk_init_parent(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) u8 index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (core->num_parents > 1 && core->ops->get_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) index = core->ops->get_parent(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return clk_core_get_parent_by_index(core, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) static void clk_core_reparent(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct clk_core *new_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) clk_reparent(core, new_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) __clk_recalc_accuracies(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) __clk_recalc_rates(core, POST_RATE_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * clk_has_parent - check if a clock is a possible parent for another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * @clk: clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * @parent: parent clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * This function can be used in drivers that need to check that a clock can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * the parent of another without actually changing the parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * Returns true if @parent is a possible parent for @clk, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) bool clk_has_parent(struct clk *clk, struct clk *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) struct clk_core *core, *parent_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /* NULL clocks should be nops, so return success if either is NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) if (!clk || !parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) core = clk->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) parent_core = parent->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) /* Optimize for the case where the parent is already the parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (core->parent == parent_core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) for (i = 0; i < core->num_parents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (!strcmp(core->parents[i].name, parent_core->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) EXPORT_SYMBOL_GPL(clk_has_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) static int clk_core_set_parent_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct clk_core *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) int p_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) unsigned long p_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (core->parent == parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) /* verify ops for multi-parent clks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (core->num_parents > 1 && !core->ops->set_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /* check that we are allowed to re-parent if the clock is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (clk_core_rate_is_protected(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* try finding the new parent index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) p_index = clk_fetch_parent_index(core, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (p_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) pr_debug("%s: clk %s can not be parent of clk %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) __func__, parent->name, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return p_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) p_rate = parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ret = clk_pm_runtime_get(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) /* propagate PRE_RATE_CHANGE notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) ret = __clk_speculate_rates(core, p_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /* abort if a driver objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (ret & NOTIFY_STOP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) goto runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /* do the re-parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) ret = __clk_set_parent(core, parent, p_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) /* propagate rate an accuracy recalculation accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) __clk_recalc_rates(core, ABORT_RATE_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) __clk_recalc_rates(core, POST_RATE_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) __clk_recalc_accuracies(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) runtime_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) return clk_core_set_parent_nolock(hw->core, parent->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) EXPORT_SYMBOL_GPL(clk_hw_set_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) * clk_set_parent - switch the parent of a mux clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * @clk: the mux clk whose input we are switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * @parent: the new input to clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) * Re-parent clk to use parent as its new input source. If clk is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) * prepared state, the clk will get enabled for the duration of this call. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * that's not acceptable for a specific clk (Eg: the consumer can't handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * that, the reparenting is glitchy in hardware, etc), use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * After successfully changing clk's parent clk_set_parent will update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * clk topology, sysfs topology and propagate rate recalculation via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * __clk_recalc_rates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) int clk_set_parent(struct clk *clk, struct clk *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) ret = clk_core_set_parent_nolock(clk->core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) parent ? parent->core : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) EXPORT_SYMBOL_GPL(clk_set_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (clk_core_rate_is_protected(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) trace_clk_set_phase(core, degrees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) if (core->ops->set_phase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) ret = core->ops->set_phase(core->hw, degrees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) core->phase = degrees;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) trace_clk_set_phase_complete(core, degrees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * clk_set_phase - adjust the phase shift of a clock signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * @clk: clock signal source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * @degrees: number of degrees the signal is shifted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) * Shifts the phase of a clock signal by the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * degrees. Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * This function makes no distinction about the input or reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * signal that we adjust the clock signal phase against. For example
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * phase locked-loop clock signal generators we may shift phase with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * respect to feedback clock signal input, but for other cases the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * clock phase may be shifted with respect to some other, unspecified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * Additionally the concept of phase shift does not propagate through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * the clock tree hierarchy, which sets it apart from clock rates and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * clock accuracy. A parent clock phase attribute does not have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * impact on the phase attribute of a child clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int clk_set_phase(struct clk *clk, int degrees)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) /* sanity check degrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) degrees %= 360;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (degrees < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) degrees += 360;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) ret = clk_core_set_phase_nolock(clk->core, degrees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) EXPORT_SYMBOL_GPL(clk_set_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static int clk_core_get_phase(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (!core->ops->get_phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /* Always try to update cached phase if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ret = core->ops->get_phase(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) core->phase = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * clk_get_phase - return the phase shift of a clock signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * @clk: clock signal source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) * Returns the phase shift of a clock node in degrees, otherwise returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) * -EERROR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) int clk_get_phase(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) ret = clk_core_get_phase(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) EXPORT_SYMBOL_GPL(clk_get_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /* Assume a default value of 50% */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) core->duty.num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) core->duty.den = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) struct clk_duty *duty = &core->duty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) if (!core->ops->get_duty_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) return clk_core_update_duty_cycle_parent_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) ret = core->ops->get_duty_cycle(core->hw, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) /* Don't trust the clock provider too much */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (duty->den == 0 || duty->num > duty->den) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) clk_core_reset_duty_cycle_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (core->parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) core->flags & CLK_DUTY_CYCLE_PARENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) ret = clk_core_update_duty_cycle_nolock(core->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) clk_core_reset_duty_cycle_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) struct clk_duty *duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) struct clk_duty *duty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (clk_core_rate_is_protected(core))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) trace_clk_set_duty_cycle(core, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (!core->ops->set_duty_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) return clk_core_set_duty_cycle_parent_nolock(core, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) ret = core->ops->set_duty_cycle(core->hw, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) memcpy(&core->duty, duty, sizeof(*duty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) trace_clk_set_duty_cycle_complete(core, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) struct clk_duty *duty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (core->parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) * @clk: clock signal source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) * @num: numerator of the duty cycle ratio to be applied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) * @den: denominator of the duty cycle ratio to be applied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * Apply the duty cycle ratio if the ratio is valid and the clock can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * perform this operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) * Returns (0) on success, a negative errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) struct clk_duty duty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) /* sanity check the ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (den == 0 || num > den)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) duty.num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) duty.den = den;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (clk->exclusive_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) clk_core_rate_protect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) unsigned int scale)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) struct clk_duty *duty = &core->duty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) ret = clk_core_update_duty_cycle_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) ret = mult_frac(scale, duty->num, duty->den);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * @clk: clock signal source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * @scale: scaling factor to be applied to represent the ratio as an integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) * Returns the duty cycle ratio of a clock node multiplied by the provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) * scaling factor, or negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) return clk_core_get_scaled_duty_cycle(clk->core, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * clk_is_match - check if two clk's point to the same hardware clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * @p: clk compared against q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * @q: clk compared against p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * Returns true if the two struct clk pointers both point to the same hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * clock node. Put differently, returns true if struct clk *p and struct clk *q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * share the same struct clk_core object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) * Returns false otherwise. Note that two NULL clks are treated as matching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) bool clk_is_match(const struct clk *p, const struct clk *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) /* trivial case: identical struct clk's or both NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (p == q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) /* true if clk->core pointers match. Avoid dereferencing garbage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (p->core == q->core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) EXPORT_SYMBOL_GPL(clk_is_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) /*** debugfs support ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) static struct dentry *rootdir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) static int inited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) static DEFINE_MUTEX(clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) static HLIST_HEAD(clk_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static struct hlist_head *orphan_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) &clk_orphan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) int phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) level * 3 + 1, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 30 - level * 3, c->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) c->enable_count, c->prepare_count, c->protect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) clk_core_get_rate_recalc(c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) clk_core_get_accuracy_recalc(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) phase = clk_core_get_phase(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) if (phase >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) seq_printf(s, "%5d", phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) seq_puts(s, "-----");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) clk_summary_show_one(s, c, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) hlist_for_each_entry(child, &c->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) clk_summary_show_subtree(s, child, level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static int clk_summary_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) struct clk_core *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) struct hlist_head **lists = (struct hlist_head **)s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) seq_puts(s, " enable prepare protect duty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) seq_puts(s, " clock count count count rate accuracy phase cycle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) seq_puts(s, "---------------------------------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) for (; *lists; lists++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) hlist_for_each_entry(c, *lists, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) clk_summary_show_subtree(s, c, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) DEFINE_SHOW_ATTRIBUTE(clk_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) int phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) unsigned long min_rate, max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) clk_core_get_boundaries(c, &min_rate, &max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) /* This should be JSON format, i.e. elements separated with a comma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) seq_printf(s, "\"%s\": { ", c->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) seq_printf(s, "\"enable_count\": %d,", c->enable_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) seq_printf(s, "\"protect_count\": %d,", c->protect_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) seq_printf(s, "\"min_rate\": %lu,", min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) seq_printf(s, "\"max_rate\": %lu,", max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) phase = clk_core_get_phase(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (phase >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) seq_printf(s, "\"phase\": %d,", phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) seq_printf(s, "\"duty_cycle\": %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) clk_core_get_scaled_duty_cycle(c, 100000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) clk_dump_one(s, c, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) hlist_for_each_entry(child, &c->children, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) seq_putc(s, ',');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) clk_dump_subtree(s, child, level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) seq_putc(s, '}');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) static int clk_dump_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) struct clk_core *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) bool first_node = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) struct hlist_head **lists = (struct hlist_head **)s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) seq_putc(s, '{');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) for (; *lists; lists++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) hlist_for_each_entry(c, *lists, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (!first_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) seq_putc(s, ',');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) first_node = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) clk_dump_subtree(s, c, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) seq_puts(s, "}\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) DEFINE_SHOW_ATTRIBUTE(clk_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) #define CLOCK_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) * This can be dangerous, therefore don't provide any real compile time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) * configuration option for this feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * People who want to use this will need to modify the source code directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) static int clk_rate_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) struct clk_core *core = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) ret = clk_core_set_rate_nolock(core, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) #define clk_rate_mode 0644
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) static int clk_prepare_enable_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) struct clk_core *core = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) ret = clk_prepare_enable(core->hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) clk_disable_unprepare(core->hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) static int clk_prepare_enable_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct clk_core *core = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) *val = core->enable_count && core->prepare_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) clk_prepare_enable_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) #define clk_rate_set NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) #define clk_rate_mode 0444
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) static int clk_rate_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) struct clk_core *core = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) *val = core->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) } clk_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) #define ENTRY(f) { f, #f }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) ENTRY(CLK_SET_RATE_GATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) ENTRY(CLK_SET_PARENT_GATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) ENTRY(CLK_SET_RATE_PARENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) ENTRY(CLK_IGNORE_UNUSED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) ENTRY(CLK_GET_RATE_NOCACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) ENTRY(CLK_SET_RATE_NO_REPARENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) ENTRY(CLK_GET_ACCURACY_NOCACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ENTRY(CLK_RECALC_NEW_RATES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) ENTRY(CLK_SET_RATE_UNGATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) ENTRY(CLK_IS_CRITICAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) ENTRY(CLK_OPS_PARENT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) ENTRY(CLK_DUTY_CYCLE_PARENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) #undef ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) static int clk_flags_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) unsigned long flags = core->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) if (flags & clk_flags[i].flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) seq_printf(s, "%s\n", clk_flags[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) flags &= ~clk_flags[i].flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) if (flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) /* Unknown flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) seq_printf(s, "0x%lx\n", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) DEFINE_SHOW_ATTRIBUTE(clk_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) static void possible_parent_show(struct seq_file *s, struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) unsigned int i, char terminator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) struct clk_core *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * Go through the following options to fetch a parent's name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * 1. Fetch the registered parent clock and use its name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * 2. Use the global (fallback) name if specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * 3. Use the local fw_name if provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * 4. Fetch parent clock's clock-output-name if DT index was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * This may still fail in some cases, such as when the parent is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * specified directly via a struct clk_hw pointer, but it isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * registered (yet).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) parent = clk_core_get_parent_by_index(core, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) seq_puts(s, parent->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) else if (core->parents[i].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) seq_puts(s, core->parents[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) else if (core->parents[i].fw_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) else if (core->parents[i].index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) seq_puts(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) of_clk_get_parent_name(core->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) core->parents[i].index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) seq_puts(s, "(missing)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) seq_putc(s, terminator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) static int possible_parents_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) for (i = 0; i < core->num_parents - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) possible_parent_show(s, core, i, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) possible_parent_show(s, core, i, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) DEFINE_SHOW_ATTRIBUTE(possible_parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) static int current_parent_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (core->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) seq_printf(s, "%s\n", core->parent->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) DEFINE_SHOW_ATTRIBUTE(current_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) static int clk_duty_cycle_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) struct clk_duty *duty = &core->duty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) seq_printf(s, "%u/%u\n", duty->num, duty->den);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) static int clk_min_rate_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) unsigned long min_rate, max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) clk_core_get_boundaries(core, &min_rate, &max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) seq_printf(s, "%lu\n", min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) static int clk_max_rate_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) struct clk_core *core = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) unsigned long min_rate, max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) clk_core_get_boundaries(core, &min_rate, &max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) seq_printf(s, "%lu\n", max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) if (!core || !pdentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) root = debugfs_create_dir(core->name, pdentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) core->dentry = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) debugfs_create_file("clk_rate", clk_rate_mode, root, core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) &clk_rate_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) debugfs_create_u32("clk_phase", 0444, root, &core->phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) debugfs_create_file("clk_duty_cycle", 0444, root, core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) &clk_duty_cycle_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) debugfs_create_file("clk_prepare_enable", 0644, root, core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) &clk_prepare_enable_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) if (core->num_parents > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) debugfs_create_file("clk_parent", 0444, root, core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) ¤t_parent_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) if (core->num_parents > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) debugfs_create_file("clk_possible_parents", 0444, root, core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) &possible_parents_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) if (core->ops->debug_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) core->ops->debug_init(core->hw, core->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) * clk_debug_register - add a clk node to the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) * @core: the clk being added to the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) * Dynamically adds a clk to the debugfs clk directory if debugfs has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) * initialized. Otherwise it bails out early since the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) * will be created lazily by clk_debug_init as part of a late_initcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) static void clk_debug_register(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) mutex_lock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) hlist_add_head(&core->debug_node, &clk_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) if (inited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) clk_debug_create_one(core, rootdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) mutex_unlock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) * clk_debug_unregister - remove a clk node from the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) * @core: the clk being removed from the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) * Dynamically removes a clk and all its child nodes from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) * debugfs clk directory if clk->dentry points to debugfs created by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) * clk_debug_register in __clk_core_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) static void clk_debug_unregister(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) mutex_lock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) hlist_del_init(&core->debug_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) debugfs_remove_recursive(core->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) core->dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) mutex_unlock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) * clk_debug_init - lazily populate the debugfs clk directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) * clks are often initialized very early during boot before memory can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) * dynamically allocated and well before debugfs is setup. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) * populates the debugfs clk directory once at boot-time when we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * debugfs is setup. It should only be called once at boot-time, all other clks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) * added dynamically will be done so with clk_debug_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) static int __init clk_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) pr_warn("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) pr_warn("********************************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) pr_warn("** This means that this kernel is built to expose clk operations **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) pr_warn("** to userspace, which may compromise security on your system. **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) pr_warn("** If you see this message and you are not debugging the **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) pr_warn("** kernel, report this immediately to your vendor! **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) pr_warn("********************************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) rootdir = debugfs_create_dir("clk", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) &clk_summary_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) &clk_dump_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) &clk_summary_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) &clk_dump_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) mutex_lock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) hlist_for_each_entry(core, &clk_debug_list, debug_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) clk_debug_create_one(core, rootdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) mutex_unlock(&clk_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) late_initcall(clk_debug_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) static inline void clk_debug_register(struct clk_core *core) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) static inline void clk_debug_unregister(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) static void clk_core_reparent_orphans_nolock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) struct clk_core *orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) struct hlist_node *tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) * walk the list of orphan clocks and reparent any that newly finds a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) * parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) struct clk_core *parent = __clk_init_parent(orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) * We need to use __clk_set_parent_before() and _after() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) * to properly migrate any prepare/enable count of the orphan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) * clock. This is important for CLK_IS_CRITICAL clocks, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) * are enabled during init but might not have a parent yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) /* update the clk tree topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) __clk_set_parent_before(orphan, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) __clk_set_parent_after(orphan, parent, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) __clk_recalc_accuracies(orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) __clk_recalc_rates(orphan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) __clk_core_update_orphan_hold_state(orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) * __clk_init_parent() will set the initial req_rate to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) * 0 if the clock doesn't have clk_ops::recalc_rate and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) * is an orphan when it's registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * 'req_rate' is used by clk_set_rate_range() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) * clk_put() to trigger a clk_set_rate() call whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) * the boundaries are modified. Let's make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) * 'req_rate' is set to something non-zero so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) * clk_set_rate_range() doesn't drop the frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) orphan->req_rate = orphan->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) * __clk_core_init - initialize the data structures in a struct clk_core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) * @core: clk_core being initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) * Initializes the lists in struct clk_core, queries the hardware for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) * parent and rate and sets them both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) static int __clk_core_init(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) struct clk_core *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) int phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (!core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) * Set hw->core after grabbing the prepare_lock to synchronize with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) * callers of clk_core_fill_parent_index() where we treat hw->core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) * being NULL as the clk not being registered yet. This is crucial so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) * that clks aren't parented until their parent is fully registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) core->hw->core = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) ret = clk_pm_runtime_get(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) /* check to see if a clock with this name is already registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (clk_core_lookup(core->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) pr_debug("%s: clk %s already initialized\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) if (core->ops->set_rate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) !((core->ops->round_rate || core->ops->determine_rate) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) core->ops->recalc_rate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) if (core->ops->set_parent && !core->ops->get_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) pr_err("%s: %s must implement .get_parent & .set_parent\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) if (core->num_parents > 1 && !core->ops->get_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) pr_err("%s: %s must implement .get_parent as it has multi parents\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) if (core->ops->set_rate_and_parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) !(core->ops->set_parent && core->ops->set_rate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) pr_err("%s: %s must implement .set_parent & .set_rate\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) * optional platform-specific magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) * The .init callback is not used by any of the basic clock types, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) * exists for weird hardware that must perform initialization magic for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) * CCF to get an accurate view of clock for any other callbacks. It may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * also be used needs to perform dynamic allocations. Such allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) * must be freed in the terminate() callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) * This callback shall not be used to initialize the parameters state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) * such as rate, parent, etc ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) * If it exist, this callback should called before any other callback of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) * the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) if (core->ops->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) ret = core->ops->init(core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) parent = core->parent = __clk_init_parent(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * Populate core->parent if parent has already been clk_core_init'd. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * parent has not yet been clk_core_init'd then place clk in the orphan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) * list. If clk doesn't have any parents then place it in the root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) * clk list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) * Every time a new clk is clk_init'd then we walk the list of orphan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) * clocks and re-parent any that are children of the clock currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) * being clk_init'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) hlist_add_head(&core->child_node, &parent->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) core->orphan = parent->orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) } else if (!core->num_parents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) hlist_add_head(&core->child_node, &clk_root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) core->orphan = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) hlist_add_head(&core->child_node, &clk_orphan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) core->orphan = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) * Set clk's accuracy. The preferred method is to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) * .recalc_accuracy. For simple clocks and lazy developers the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) * fallback is to use the parent's accuracy. If a clock doesn't have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * parent (or is orphaned) then accuracy is set to zero (perfect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) * clock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) if (core->ops->recalc_accuracy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) core->accuracy = core->ops->recalc_accuracy(core->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) clk_core_get_accuracy_no_lock(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) else if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) core->accuracy = parent->accuracy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) core->accuracy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) * Set clk's phase by clk_core_get_phase() caching the phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) * Since a phase is by definition relative to its parent, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) * query the current clock phase, or just assume it's in phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) phase = clk_core_get_phase(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (phase < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) ret = phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) * Set clk's duty cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) clk_core_update_duty_cycle_nolock(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) * Set clk's rate. The preferred method is to use .recalc_rate. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) * simple clocks and lazy developers the default fallback is to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) * parent's rate. If a clock doesn't have a parent (or is orphaned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) * then rate is set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (core->ops->recalc_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) rate = core->ops->recalc_rate(core->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) clk_core_get_rate_nolock(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) else if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) rate = parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) core->rate = core->req_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) core->boot_enabled = clk_core_is_enabled(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) * don't get accidentally disabled when walking the orphan tree and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * reparenting clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) if (core->flags & CLK_IS_CRITICAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) ret = clk_core_prepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) pr_warn("%s: critical clk '%s' failed to prepare\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) ret = clk_core_enable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) pr_warn("%s: critical clk '%s' failed to enable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) clk_core_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) clk_core_hold_state(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) clk_core_reparent_orphans_nolock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) kref_init(&core->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) clk_pm_runtime_put(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) hlist_del_init(&core->child_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) core->hw->core = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) clk_debug_register(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) * @core: clk to add consumer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) * @clk: consumer to link to a clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) hlist_add_head(&clk->clks_node, &core->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) * @clk: consumer to unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) static void clk_core_unlink_consumer(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) hlist_del(&clk->clks_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) * @core: clk to allocate a consumer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) * @dev_id: string describing device name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) * @con_id: connection ID string on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) * Returns: clk consumer left unlinked from the consumer list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) clk = kzalloc(sizeof(*clk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) clk->core = core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) clk->dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) clk->max_rate = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) * free_clk - Free a clk consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) * @clk: clk consumer to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) * Note, this assumes the clk has been unlinked from the clk_core consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) static void free_clk(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) kfree_const(clk->con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) kfree(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) * a clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) * @dev: clk consumer device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) * @hw: clk_hw associated with the clk being consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) * @dev_id: string describing device name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) * @con_id: connection ID string on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) * This is the main function used to create a clk pointer for use by clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) * consumers. It connects a consumer to the clk_core and clk_hw structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) * used by the framework and clk provider respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) const char *dev_id, const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) /* This is to allow this function to be chained to others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (IS_ERR_OR_NULL(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) return ERR_CAST(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) core = hw->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) clk = alloc_clk(core, dev_id, con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) clk->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) if (!try_module_get(core->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) free_clk(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) kref_get(&core->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) clk_core_link_consumer(core, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) * clk_hw_get_clk - get clk consumer given an clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) * @hw: clk_hw associated with the clk being consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) * @con_id: connection ID string on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) * Returns: new clk consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) * This is the function to be used by providers which need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) * to get a consumer clk and act on the clock element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) * Calls to this function must be balanced with calls clk_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) struct device *dev = hw->core->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) EXPORT_SYMBOL(clk_hw_get_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) const char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (!src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (must_exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) static int clk_core_populate_parent_map(struct clk_core *core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) const struct clk_init_data *init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) u8 num_parents = init->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) const char * const *parent_names = init->parent_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) const struct clk_hw **parent_hws = init->parent_hws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) const struct clk_parent_data *parent_data = init->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) struct clk_parent_map *parents, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) if (!num_parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) * Avoid unnecessary string look-ups of clk_core's possible parents by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) * having a cache of names/clk_hw pointers to clk_core pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) core->parents = parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if (!parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) /* Copy everything over because it might be __initdata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) for (i = 0, parent = parents; i < num_parents; i++, parent++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) parent->index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) if (parent_names) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) /* throw a WARN if any entries are NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) WARN(!parent_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) "%s: invalid NULL in %s's .parent_names\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) __func__, core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) ret = clk_cpy_name(&parent->name, parent_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) } else if (parent_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) parent->hw = parent_data[i].hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) parent->index = parent_data[i].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) ret = clk_cpy_name(&parent->fw_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) parent_data[i].fw_name, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) ret = clk_cpy_name(&parent->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) parent_data[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) } else if (parent_hws) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) parent->hw = parent_hws[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) WARN(1, "Must specify parents if num_parents > 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) kfree_const(parents[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) kfree_const(parents[i].fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) } while (--i >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) kfree(parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) static void clk_core_free_parent_map(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) int i = core->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (!core->num_parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) kfree_const(core->parents[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) kfree_const(core->parents[i].fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) kfree(core->parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) static struct clk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) const struct clk_init_data *init = hw->init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) * The init data is not supposed to be used outside of registration path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) * Set it to NULL so that provider drivers can't use it either and so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) * we catch use of hw->init early on in the core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) hw->init = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) core = kzalloc(sizeof(*core), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (!core) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) goto fail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) core->name = kstrdup_const(init->name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) if (!core->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) goto fail_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) if (WARN_ON(!init->ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) goto fail_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) core->ops = init->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) if (dev && pm_runtime_enabled(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) core->rpm_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) core->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) core->of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) if (dev && dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) core->owner = dev->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) core->hw = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) core->flags = init->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) core->num_parents = init->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) core->min_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) core->max_rate = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) ret = clk_core_populate_parent_map(core, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) goto fail_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) INIT_HLIST_HEAD(&core->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) * Don't call clk_hw_create_clk() here because that would pin the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) * provider module to itself and prevent it from ever being removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) hw->clk = alloc_clk(core, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (IS_ERR(hw->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) ret = PTR_ERR(hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) goto fail_create_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) clk_core_link_consumer(core, hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) ret = __clk_core_init(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) return hw->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) clk_core_unlink_consumer(hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) free_clk(hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) hw->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) fail_create_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) clk_core_free_parent_map(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) fail_parents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) fail_ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) kfree_const(core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) fail_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) kfree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) fail_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) * @dev: Device to get device node of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) * Return: device node pointer of @dev, or the device node pointer of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) * @dev->parent if dev doesn't have a device node, or NULL if neither
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) * @dev or @dev->parent have a device node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) static struct device_node *dev_or_parent_of_node(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) np = dev_of_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) np = dev_of_node(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) return np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) * clk_register - allocate a new clock, register it and return an opaque cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) * @dev: device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) * clk_register is the *deprecated* interface for populating the clock tree with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) * new clock nodes. Use clk_hw_register() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) * Returns: a pointer to the newly allocated struct clk which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) * cannot be dereferenced by driver code but may be used in conjunction with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) * rest of the clock API. In the event of an error clk_register will return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * error code; drivers must test for an error code after calling clk_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) struct clk *clk_register(struct device *dev, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) return __clk_register(dev, dev_or_parent_of_node(dev), hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) EXPORT_SYMBOL_GPL(clk_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) * clk_hw_register - register a clk_hw and return an error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) * @dev: device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * clk_hw_register is the primary interface for populating the clock tree with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) * new clock nodes. It returns an integer equal to zero indicating success or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) * less than zero indicating failure. Drivers must test for an error code after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) * calling clk_hw_register().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) int clk_hw_register(struct device *dev, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) EXPORT_SYMBOL_GPL(clk_hw_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * of_clk_hw_register - register a clk_hw and return an error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) * @node: device_node of device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) * of_clk_hw_register() is the primary interface for populating the clock tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * with new clock nodes when a struct device is not available, but a struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * device_node is. It returns an integer equal to zero indicating success or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) * less than zero indicating failure. Drivers must test for an error code after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) * calling of_clk_hw_register().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) EXPORT_SYMBOL_GPL(of_clk_hw_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) /* Free memory allocated for a clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) static void __clk_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) struct clk_core *core = container_of(ref, struct clk_core, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) clk_core_free_parent_map(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) kfree_const(core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) kfree(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) * Empty clk_ops for unregistered clocks. These are used temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) * after clk_unregister() was called on a clock and until last clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) * consumer calls clk_put() and the struct clk object is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) static int clk_nodrv_prepare_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) static const struct clk_ops clk_nodrv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) .enable = clk_nodrv_prepare_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) .disable = clk_nodrv_disable_unprepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) .prepare = clk_nodrv_prepare_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) .unprepare = clk_nodrv_disable_unprepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) .set_rate = clk_nodrv_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) .set_parent = clk_nodrv_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) struct clk_core *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) for (i = 0; i < root->num_parents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (root->parents[i].core == target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) root->parents[i].core = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) hlist_for_each_entry(child, &root->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) clk_core_evict_parent_cache_subtree(child, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) /* Remove this clk from all parent caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) static void clk_core_evict_parent_cache(struct clk_core *core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) struct hlist_head **lists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) struct clk_core *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) lockdep_assert_held(&prepare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) for (lists = all_lists; *lists; lists++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) hlist_for_each_entry(root, *lists, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) clk_core_evict_parent_cache_subtree(root, core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * clk_unregister - unregister a currently registered clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) * @clk: clock to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) void clk_unregister(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) const struct clk_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) clk_debug_unregister(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) ops = clk->core->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) if (ops == &clk_nodrv_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) pr_err("%s: unregistered clock: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) clk->core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) * Assign empty clock ops for consumers that might still hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) * a reference to this clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) flags = clk_enable_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) clk->core->ops = &clk_nodrv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) clk_enable_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) if (ops->terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) ops->terminate(clk->core->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) if (!hlist_empty(&clk->core->children)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) struct hlist_node *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) /* Reparent all children to the orphan list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) hlist_for_each_entry_safe(child, t, &clk->core->children,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) clk_core_set_parent_nolock(child, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) clk_core_evict_parent_cache(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) hlist_del_init(&clk->core->child_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) if (clk->core->prepare_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) pr_warn("%s: unregistering prepared clock: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) __func__, clk->core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) if (clk->core->protect_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) pr_warn("%s: unregistering protected clock: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) __func__, clk->core->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) kref_put(&clk->core->ref, __clk_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) free_clk(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) EXPORT_SYMBOL_GPL(clk_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) * clk_hw_unregister - unregister a currently registered clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) * @hw: hardware-specific clock data to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) void clk_hw_unregister(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) clk_unregister(hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) EXPORT_SYMBOL_GPL(clk_hw_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) static void devm_clk_unregister_cb(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) clk_unregister(*(struct clk **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) clk_hw_unregister(*(struct clk_hw **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) * devm_clk_register - resource managed clk_register()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) * @dev: device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) * Clocks returned from this function are automatically clk_unregister()ed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) * driver detach. See clk_register() for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) struct clk **clkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) if (!clkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) clk = clk_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if (!IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) *clkp = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) devres_add(dev, clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) devres_free(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) EXPORT_SYMBOL_GPL(devm_clk_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) * devm_clk_hw_register - resource managed clk_hw_register()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * @dev: device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) * Managed clk_hw_register(). Clocks registered by this function are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) * for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) struct clk_hw **hwp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) if (!hwp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) ret = clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) *hwp = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) devres_add(dev, hwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) devres_free(hwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) EXPORT_SYMBOL_GPL(devm_clk_hw_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) static int devm_clk_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) struct clk *c = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) if (WARN_ON(!c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) return c == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) static int devm_clk_hw_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) struct clk_hw *hw = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) if (WARN_ON(!hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) return hw == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) * devm_clk_unregister - resource managed clk_unregister()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) * @dev: device that is unregistering the clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) * @clk: clock to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) * Deallocate a clock allocated with devm_clk_register(). Normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) * this function will not need to be called and the resource management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) * code will ensure that the resource is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) void devm_clk_unregister(struct device *dev, struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) EXPORT_SYMBOL_GPL(devm_clk_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) * devm_clk_hw_unregister - resource managed clk_hw_unregister()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) * @dev: device that is unregistering the hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) * @hw: link to hardware-specific clock data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) * this function will not need to be called and the resource management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) * code will ensure that the resource is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) static void devm_clk_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) clk_put(*(struct clk **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) * @dev: device that is registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) * @hw: clk_hw associated with the clk being consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) * @con_id: connection ID string on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) * Managed clk_hw_get_clk(). Clocks got with this function are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) * automatically clk_put() on driver detach. See clk_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) * for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) struct clk **clkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) /* This should not happen because it would mean we have drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) * passing around clk_hw pointers instead of having the caller use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) * proper clk_get() style APIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) WARN_ON_ONCE(dev != hw->core->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (!clkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) clk = clk_hw_get_clk(hw, con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) if (!IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) *clkp = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) devres_add(dev, clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) devres_free(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) * clkdev helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) void __clk_put(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * given user should be balanced with calls to clk_rate_exclusive_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * and by that same consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (WARN_ON(clk->exclusive_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) /* We voiced our concern, let's sanitize the situation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) clk->core->protect_count -= (clk->exclusive_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) clk_core_rate_unprotect(clk->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) clk->exclusive_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) hlist_del(&clk->clks_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) if (clk->min_rate > clk->core->req_rate ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) clk->max_rate < clk->core->req_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) owner = clk->core->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) kref_put(&clk->core->ref, __clk_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) module_put(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) free_clk(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) /*** clk rate change notifiers ***/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) * clk_notifier_register - add a clk rate change notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) * @clk: struct clk * to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) * @nb: struct notifier_block * with callback info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) * Request notification when clk's rate changes. This uses an SRCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) * notifier because we want it to block and notifier unregistrations are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) * uncommon. The callbacks associated with the notifier must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) * re-enter into the clk framework by calling any top-level clk APIs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) * this will cause a nested prepare_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) * In all notification cases (pre, post and abort rate change) the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) * clock rate is passed to the callback via struct clk_notifier_data.old_rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) * and the new frequency is passed via struct clk_notifier_data.new_rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) * clk_notifier_register() must be called from non-atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) * Returns -EINVAL if called with null arguments, -ENOMEM upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) * allocation failure; otherwise, passes along the return value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) * srcu_notifier_chain_register().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) struct clk_notifier *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) if (!clk || !nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) /* search the list of notifiers for this clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) list_for_each_entry(cn, &clk_notifier_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) if (cn->clk == clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) /* if clk wasn't in the notifier list, allocate new clk_notifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) cn = kzalloc(sizeof(*cn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) if (!cn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) cn->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) srcu_init_notifier_head(&cn->notifier_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) list_add(&cn->node, &clk_notifier_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) clk->core->notifier_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) EXPORT_SYMBOL_GPL(clk_notifier_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) * clk_notifier_unregister - remove a clk rate change notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) * @clk: struct clk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) * @nb: struct notifier_block * with callback info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) * Request no further notification for changes to 'clk' and frees memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) * allocated in clk_notifier_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) * Returns -EINVAL if called with null arguments; otherwise, passes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) * along the return value of srcu_notifier_chain_unregister().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) struct clk_notifier *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) if (!clk || !nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) list_for_each_entry(cn, &clk_notifier_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) if (cn->clk == clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) clk->core->notifier_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) /* XXX the notifier code should handle this better */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) if (!cn->notifier_head.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) srcu_cleanup_notifier_head(&cn->notifier_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) list_del(&cn->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) kfree(cn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) EXPORT_SYMBOL_GPL(clk_notifier_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) struct clk_notifier_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) struct notifier_block *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) static void devm_clk_notifier_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) struct clk_notifier_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) clk_notifier_unregister(devres->clk, devres->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) int devm_clk_notifier_register(struct device *dev, struct clk *clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) struct clk_notifier_devres *devres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) devres = devres_alloc(devm_clk_notifier_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) sizeof(*devres), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) if (!devres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) ret = clk_notifier_register(clk, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) devres->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) devres->nb = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) devres_free(devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) static void clk_core_reparent_orphans(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) clk_core_reparent_orphans_nolock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) * struct of_clk_provider - Clock provider registration structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * @link: Entry in global list of clock providers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) * @node: Pointer to device tree node of clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) * @get: Get clock callback. Returns NULL or a struct clk for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) * given clock specifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) * struct clk_hw for the given clock specifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) * @data: context pointer to be passed into @get callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) struct of_clk_provider {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) extern struct of_device_id __clk_of_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) static const struct of_device_id __clk_of_table_sentinel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) __used __section("__clk_of_table_end");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) static LIST_HEAD(of_clk_providers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) static DEFINE_MUTEX(of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) struct clk_onecell_data *clk_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) unsigned int idx = clkspec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) if (idx >= clk_data->clk_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) pr_err("%s: invalid clock index %u\n", __func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) return clk_data->clks[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) struct clk_hw_onecell_data *hw_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) unsigned int idx = clkspec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) if (idx >= hw_data->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) pr_err("%s: invalid index %u\n", __func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) return hw_data->hws[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) * of_clk_add_provider() - Register a clock provider for a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) * @np: Device node pointer associated with clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) * @clk_src_get: callback for decoding clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) * @data: context pointer for @clk_src_get callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) int of_clk_add_provider(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) struct of_clk_provider *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) cp = kzalloc(sizeof(*cp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) if (!cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) cp->node = of_node_get(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) cp->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) cp->get = clk_src_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) mutex_lock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) list_add(&cp->link, &of_clk_providers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) mutex_unlock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) pr_debug("Added clock from %pOF\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) clk_core_reparent_orphans();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) ret = of_clk_set_defaults(np, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) of_clk_del_provider(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) fwnode_dev_initialized(&np->fwnode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) EXPORT_SYMBOL_GPL(of_clk_add_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) * of_clk_add_hw_provider() - Register a clock provider for a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) * @np: Device node pointer associated with clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) * @get: callback for decoding clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) * @data: context pointer for @get callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) int of_clk_add_hw_provider(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) struct clk_hw *(*get)(struct of_phandle_args *clkspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) struct of_clk_provider *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) cp = kzalloc(sizeof(*cp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) if (!cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) cp->node = of_node_get(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) cp->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) cp->get_hw = get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) mutex_lock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) list_add(&cp->link, &of_clk_providers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) mutex_unlock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) pr_debug("Added clk_hw provider from %pOF\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) clk_core_reparent_orphans();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) ret = of_clk_set_defaults(np, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) of_clk_del_provider(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) static void devm_of_clk_release_provider(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) of_clk_del_provider(*(struct device_node **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) * We allow a child device to use its parent device as the clock provider node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) * for cases like MFD sub-devices where the child device driver wants to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) * devm_*() APIs but not list the device in DT as a sub-node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) static struct device_node *get_clk_provider_node(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) struct device_node *np, *parent_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) parent_np = dev->parent ? dev->parent->of_node : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) if (!of_find_property(np, "#clock-cells", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) if (of_find_property(parent_np, "#clock-cells", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) np = parent_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) return np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) * devm_of_clk_add_hw_provider() - Managed clk provider node registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) * @dev: Device acting as the clock provider (used for DT node and lifetime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) * @get: callback for decoding clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) * @data: context pointer for @get callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) * Registers clock provider for given device's node. If the device has no DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) * node or if the device node lacks of clock provider information (#clock-cells)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) * then the parent device's node is scanned for this information. If parent node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) * has the #clock-cells then it is used in registration. Provider is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) * automatically released at device exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) * Return: 0 on success or an errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) int devm_of_clk_add_hw_provider(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) struct clk_hw *(*get)(struct of_phandle_args *clkspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) struct device_node **ptr, *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) np = get_clk_provider_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) ret = of_clk_add_hw_provider(np, get, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) *ptr = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) * of_clk_del_provider() - Remove a previously registered clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) * @np: Device node pointer associated with clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) void of_clk_del_provider(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) struct of_clk_provider *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) mutex_lock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) list_for_each_entry(cp, &of_clk_providers, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if (cp->node == np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) list_del(&cp->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) fwnode_dev_initialized(&np->fwnode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) of_node_put(cp->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) kfree(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) mutex_unlock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) EXPORT_SYMBOL_GPL(of_clk_del_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) static int devm_clk_provider_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) struct device_node **np = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) if (WARN_ON(!np || !*np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) return *np == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) * devm_of_clk_del_provider() - Remove clock provider registered using devm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) * @dev: Device to whose lifetime the clock provider was bound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) void devm_of_clk_del_provider(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) struct device_node *np = get_clk_provider_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) ret = devres_release(dev, devm_of_clk_release_provider,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) devm_clk_provider_match, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) EXPORT_SYMBOL(devm_of_clk_del_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) * of_parse_clkspec() - Parse a DT clock specifier for a given device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) * @np: device node to parse clock specifier from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) * @index: index of phandle to parse clock out of. If index < 0, @name is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) * @name: clock name to find and parse. If name is NULL, the index is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) * @out_args: Result of parsing the clock specifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) * Parses a device node's "clocks" and "clock-names" properties to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) * phandle and cells for the index or name that is desired. The resulting clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) * specifier is placed into @out_args, or an errno is returned when there's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) * parsing error. The @index argument is ignored if @name is non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) * phandle1: clock-controller@1 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) * #clock-cells = <2>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) * phandle2: clock-controller@2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) * #clock-cells = <1>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) * clock-consumer@3 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) * clocks = <&phandle1 1 2 &phandle2 3>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) * clock-names = "name1", "name2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) * To get a device_node for `clock-controller@2' node you may call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) * function a few different ways:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) * the "clock-names" property of @np.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) static int of_parse_clkspec(const struct device_node *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) const char *name, struct of_phandle_args *out_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) /* Walk up the tree of devices looking for a clock property that matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) while (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) * For named clocks, first look up the name in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) * "clock-names" property. If it cannot be found, then index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) * will be an error code and of_parse_phandle_with_args() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) * return -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) index = of_property_match_string(np, "clock-names", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) index, out_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) if (name && index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) * No matching clock found on this node. If the parent node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) * has a "clock-ranges" property, then we can try one of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) * clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) np = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) if (np && !of_get_property(np, "clock-ranges", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) struct of_phandle_args *clkspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) if (provider->get_hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) return provider->get_hw(clkspec, provider->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) clk = provider->get(clkspec, provider->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) return ERR_CAST(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) return __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) struct of_clk_provider *provider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) if (!clkspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) mutex_lock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) list_for_each_entry(provider, &of_clk_providers, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) if (provider->node == clkspec->np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) hw = __of_clk_get_hw_from_provider(provider, clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) if (!IS_ERR(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) mutex_unlock(&of_clk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) * of_clk_get_from_provider() - Lookup a clock from a clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) * @clkspec: pointer to a clock specifier data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) * This function looks up a struct clk from the registered list of clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) * providers, an input is a clock specifier data structure as returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) * from the of_parse_phandle_with_args() function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) return clk_hw_create_clk(NULL, hw, NULL, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) struct of_phandle_args clkspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) ret = of_parse_clkspec(np, index, con_id, &clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) hw = of_clk_get_hw_from_clkspec(&clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) of_node_put(clkspec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) static struct clk *__of_clk_get(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) int index, const char *dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) const char *con_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) return clk_hw_create_clk(NULL, hw, dev_id, con_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) struct clk *of_clk_get(struct device_node *np, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) return __of_clk_get(np, index, np->full_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) EXPORT_SYMBOL(of_clk_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) * @np: pointer to clock consumer node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) * @name: name of consumer's clock input, or NULL for the first clock reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) * This function parses the clocks and clock-names properties,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) * and uses them to look up the struct clk from the registered list of clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) * providers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) return __of_clk_get(np, 0, np->full_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) EXPORT_SYMBOL(of_clk_get_by_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) * of_clk_get_parent_count() - Count the number of clocks a device node has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) * @np: device node to count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) * Returns: The number of clocks that are possible parents of this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) unsigned int of_clk_get_parent_count(const struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) const char *of_clk_get_parent_name(const struct device_node *np, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) struct of_phandle_args clkspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) const char *clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) const __be32 *vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) u32 pv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) &clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) index = clkspec.args_count ? clkspec.args[0] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) /* if there is an indices property, use it to transfer the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) * specified into an array offset for the clock-output-names property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) if (index == pv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) index = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) /* We went off the end of 'clock-indices' without finding it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) if (prop && !vp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) if (of_property_read_string_index(clkspec.np, "clock-output-names",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) &clk_name) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) * Best effort to get the name if the clock has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) * registered with the framework. If the clock isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) * registered, we return the node name as the name of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) * the clock as long as #clock-cells = 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) clk = of_clk_get_from_provider(&clkspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) if (clkspec.args_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) clk_name = clkspec.np->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) clk_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) clk_name = __clk_get_name(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) of_node_put(clkspec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) return clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) * number of parents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) * @np: Device node pointer associated with clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) * @parents: pointer to char array that hold the parents' names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) * @size: size of the @parents array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) * Return: number of parents for the clock node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) int of_clk_parent_fill(struct device_node *np, const char **parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) EXPORT_SYMBOL_GPL(of_clk_parent_fill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) struct clock_provider {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) void (*clk_init_cb)(struct device_node *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) * This function looks for a parent clock. If there is one, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) * checks that the provider for this parent clock was initialized, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) * this case the parent clock will be ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) static int parent_ready(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) struct clk *clk = of_clk_get(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) /* this parent is ready we can check the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) if (!IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) /* at least one parent is not ready, we exit now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) if (PTR_ERR(clk) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) * Here we make assumption that the device tree is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) * written correctly. So an error means that there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) * no more parent. As we didn't exit yet, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) * previous parent are ready. If there is no clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) * parent, no need to wait for them, then we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) * consider their absence as being ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) * @np: Device node pointer associated with clock provider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) * @index: clock index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) * @flags: pointer to top-level framework flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) * Detects if the clock-critical property exists and, if so, sets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) * corresponding CLK_IS_CRITICAL flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) * Do not use this function. It exists only for legacy Device Tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) * bindings, such as the one-clock-per-node style that are outdated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) * Those bindings typically put all clock data into .dts and the Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) * driver has no clock data, thus making it impossible to set this flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) * correctly from the driver. Only those drivers may call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) * of_clk_detect_critical from their setup functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) * Return: error code or zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) int of_clk_detect_critical(struct device_node *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) const __be32 *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) uint32_t idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) if (!np || !flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) if (index == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) *flags |= CLK_IS_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) * of_clk_init() - Scan and init clock providers from the DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) * @matches: array of compatible values and init functions for providers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) * This function scans the device tree for matching clock providers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) * and calls their initialization functions. It also does it by trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) * to follow the dependencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) void __init of_clk_init(const struct of_device_id *matches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) struct clock_provider *clk_provider, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) bool is_init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) bool force = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) LIST_HEAD(clk_provider_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) if (!matches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) matches = &__clk_of_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) /* First prepare the list of the clocks providers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) for_each_matching_node_and_match(np, matches, &match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) struct clock_provider *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) if (!of_device_is_available(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) parent = kzalloc(sizeof(*parent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) list_for_each_entry_safe(clk_provider, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) &clk_provider_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) list_del(&clk_provider->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) of_node_put(clk_provider->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) kfree(clk_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) parent->clk_init_cb = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) parent->np = of_node_get(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) list_add_tail(&parent->node, &clk_provider_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) while (!list_empty(&clk_provider_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) is_init_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) list_for_each_entry_safe(clk_provider, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) &clk_provider_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) if (force || parent_ready(clk_provider->np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) /* Don't populate platform devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) of_node_set_flag(clk_provider->np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) clk_provider->clk_init_cb(clk_provider->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) of_clk_set_defaults(clk_provider->np, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) list_del(&clk_provider->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) of_node_put(clk_provider->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) kfree(clk_provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) is_init_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) * We didn't manage to initialize any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) * remaining providers during the last loop, so now we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) * initialize all the remaining ones unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) * in case the clock parent was not mandatory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) if (!is_init_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) #ifdef CONFIG_COMMON_CLK_PROCFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) static int clk_rate_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) seq_puts(s, "set clk rate:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) seq_puts(s, " echo [clk_name] [rate(Hz)] > /proc/clk/rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) static int clk_rate_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) return single_open(file, clk_rate_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) static ssize_t clk_rate_write(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) char clk_name[40], input[55];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) int argc, ret, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) if (cnt >= sizeof(input))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) if (copy_from_user(input, buf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) input[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) argc = sscanf(input, "%38s %10d", clk_name, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) if (argc != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) core = clk_core_lookup(clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) if (IS_ERR_OR_NULL(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) pr_err("get %s error\n", clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) ret = clk_core_set_rate_nolock(core, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) pr_err("set %s rate %d error\n", clk_name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) static const struct proc_ops clk_rate_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) .proc_open = clk_rate_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) .proc_read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) .proc_write = clk_rate_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) .proc_lseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) .proc_release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) static int clk_enable_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) seq_puts(s, "enable clk:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) seq_puts(s, " echo enable [clk_name] > /proc/clk/enable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) seq_puts(s, "disable clk:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) seq_puts(s, " echo disable [clk_name] > /proc/clk/enable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) static int clk_enable_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) return single_open(file, clk_enable_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) static ssize_t clk_enable_write(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) char cmd[10], clk_name[40], input[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) struct clk_core *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) int argc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) if (cnt >= sizeof(input))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) if (copy_from_user(input, buf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) input[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) argc = sscanf(input, "%8s %38s", cmd, clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) if (argc != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) core = clk_core_lookup(clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) if (IS_ERR_OR_NULL(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) pr_err("get %s error\n", clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (!strncmp(cmd, "enable", strlen("enable"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) ret = clk_core_prepare_enable(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) pr_err("enable %s err\n", clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) } else if (!strncmp(cmd, "disable", strlen("disable"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) clk_core_disable_unprepare(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) pr_err("unsupported cmd(%s)\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) static const struct proc_ops clk_enable_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) .proc_open = clk_enable_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) .proc_read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) .proc_write = clk_enable_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) .proc_lseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) .proc_release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) static int clk_parent_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) seq_puts(s, "echo [clk_name] [parent_name] > /proc/clk/parent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) static int clk_parent_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) return single_open(file, clk_parent_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) static ssize_t clk_parent_write(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) char clk_name[40], p_name[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) char input[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) struct clk_core *core, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) int argc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) if (cnt >= sizeof(input))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) if (copy_from_user(input, buf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) input[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) argc = sscanf(input, "%38s %38s", clk_name, p_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) if (argc != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) core = clk_core_lookup(clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) if (IS_ERR_OR_NULL(core)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) pr_err("get %s error\n", clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) p = clk_core_lookup(p_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) if (IS_ERR_OR_NULL(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) pr_err("get %s error\n", p_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) ret = clk_core_set_parent_nolock(core, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) pr_err("set clk(%s)'s parent(%s) error\n", clk_name, p_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) static const struct proc_ops clk_parent_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) .proc_open = clk_parent_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) .proc_read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) .proc_write = clk_parent_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) .proc_lseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) .proc_release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) static void clk_proc_summary_show_one(struct seq_file *s, struct clk_core *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) level * 3 + 1, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 30 - level * 3, c->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) c->enable_count, c->prepare_count, c->protect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) clk_core_get_rate_recalc(c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) clk_core_get_accuracy_recalc(c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) clk_core_get_phase(c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) clk_core_get_scaled_duty_cycle(c, 100000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) static void clk_proc_summary_show_subtree(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) struct clk_core *c, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) struct clk_core *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) clk_proc_summary_show_one(s, c, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) hlist_for_each_entry(child, &c->children, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) clk_proc_summary_show_subtree(s, child, level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) static int clk_proc_summary_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) struct clk_core *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) struct hlist_head *all_lists[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) &clk_root_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) &clk_orphan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) struct hlist_head **lists = all_lists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) seq_puts(s, " enable prepare protect duty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) seq_puts(s, " clock count count count rate accuracy phase cycle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) seq_puts(s, "---------------------------------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) clk_prepare_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) for (; *lists; lists++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) hlist_for_each_entry(c, *lists, child_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) clk_proc_summary_show_subtree(s, c, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) clk_prepare_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) static int __init clk_create_procfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) struct proc_dir_entry *proc_clk_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) struct proc_dir_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) proc_clk_root = proc_mkdir("clk", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) if (!proc_clk_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) ent = proc_create("rate", 0644, proc_clk_root, &clk_rate_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) ent = proc_create("enable", 0644, proc_clk_root, &clk_enable_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) ent = proc_create("parent", 0644, proc_clk_root, &clk_parent_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) ent = proc_create_single("summary", 0444, proc_clk_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) clk_proc_summary_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) proc_remove(proc_clk_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) late_initcall_sync(clk_create_procfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) #endif