^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2016 Maxime Ripard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Maxime Ripard <maxime.ripard@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ccu_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "ccu_gate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "ccu_reset.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static DEFINE_SPINLOCK(ccu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (!lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (common->features & CCU_FEATURE_LOCK_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) addr = common->base + common->lock_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) addr = common->base + common->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * This clock notifier is called when the frequency of a PLL clock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * changed. In common PLL designs, changes to the dividers take effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * almost immediately, while changes to the multipliers (implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * as dividers in the feedback loop) take a few cycles to work into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * the feedback loop for the PLL to stablize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Sometimes when the PLL clock rate is changed, the decrease in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * divider is too much for the decrease in the multiplier to catch up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The PLL clock rate will spike, and in some cases, might lock up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * completely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * This notifier callback will gate and then ungate the clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * effectively resetting it, so it proceeds to work. Care must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * taken to reparent consumers to other temporary clocks during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * rate change, and that this notifier callback must be the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * to be registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int ccu_pll_notifier_cb(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (event != POST_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ccu_gate_helper_disable(pll->common, pll->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ret = ccu_gate_helper_enable(pll->common, pll->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ccu_helper_wait_for_lock(pll->common, pll->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return clk_notifier_register(pll_nb->common->hw.clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) &pll_nb->clk_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const struct sunxi_ccu_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct ccu_reset *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = 0; i < desc->num_ccu_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct ccu_common *cclk = desc->ccu_clks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!cclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cclk->base = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) cclk->lock = &ccu_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) for (i = 0; i < desc->hw_clks->num ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct clk_hw *hw = desc->hw_clks->hws[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) name = hw->init->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = of_clk_hw_register(node, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pr_err("Couldn't register clock %d - %s\n", i, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto err_clk_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) desc->hw_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto err_clk_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) reset = kzalloc(sizeof(*reset), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto err_alloc_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) reset->rcdev.of_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) reset->rcdev.ops = &ccu_reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) reset->rcdev.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) reset->rcdev.nr_resets = desc->num_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) reset->base = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) reset->lock = &ccu_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) reset->reset_map = desc->resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ret = reset_controller_register(&reset->rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) goto err_of_clk_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) err_of_clk_unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kfree(reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) err_alloc_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) of_clk_del_provider(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) err_clk_unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct clk_hw *hw = desc->hw_clks->hws[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) clk_hw_unregister(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }