^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Thomas Abraham <thomas.ab@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2015 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This file contains the utility function to register CPU clock for Samsung
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * group of CPUs. The CPU clock is typically derived from a hierarchy of clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * blocks which includes mux and divider blocks. There are a number of other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * clock for CPU domain. The rates of these auxiliary clocks are related to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * CPU clock rate and this relation is usually specified in the hardware manual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * of the SoC or supplied after the SoC characterization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The below implementation of the CPU clock allows the rate changes of the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * clock and the corresponding rate changes of the auxillary clocks of the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * domain. The platform clock driver provides a clock register configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * for each configurable rate which is then used to program the clock hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * registers to acheive a fast co-oridinated rate change for all the CPU domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * On a rate change request for the CPU clock, the rate change is propagated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * upto the PLL supplying the clock to the CPU domain clock blocks. While the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * alternate clock source. If required, the alternate clock source is divided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * down in order to keep the output clock rate within the previous OPP limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "clk-cpu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define E4210_SRC_CPU 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define E4210_STAT_CPU 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define E4210_DIV_CPU0 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define E4210_DIV_CPU1 0x304
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define E4210_DIV_STAT_CPU0 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define E4210_DIV_STAT_CPU1 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define E5433_MUX_SEL2 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define E5433_MUX_STAT2 0x208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define E5433_DIV_CPU0 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define E5433_DIV_CPU1 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define E5433_DIV_STAT_CPU0 0x500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define E5433_DIV_STAT_CPU1 0x504
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define E4210_DIV0_RATIO0_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define E4210_DIV1_HPM_MASK (0x7 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define E4210_DIV1_COPY_MASK (0x7 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define E4210_MUX_HPM_MASK (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define E4210_DIV0_ATB_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define MAX_DIV 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DIV_MASK 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DIV_MASK_ALL 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MUX_MASK 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Helper function to wait until divider(s) have stabilized after the divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * value has changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long timeout = jiffies + msecs_to_jiffies(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!(readl(div_reg) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } while (time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!(readl(div_reg) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_err("%s: timeout in divider stablization\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Helper function to wait until mux has stabilized after the mux selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * value was changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long mux_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long timeout = jiffies + msecs_to_jiffies(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } while (time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pr_err("%s: re-parenting mux timed-out\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* common round rate callback useable for all types of CPU clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static long exynos_cpuclk_round_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long drate, unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct clk_hw *parent = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *prate = clk_hw_round_rate(parent, drate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return *prate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* common recalc rate callback useable for all types of CPU clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * The CPU clock output (armclk) rate is the same as its parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * rate. Although there exist certain dividers inside the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * clock block that could be used to divide the parent clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * the driver does not make use of them currently, except during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * frequency transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static const struct clk_ops exynos_cpuclk_clk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .recalc_rate = exynos_cpuclk_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .round_rate = exynos_cpuclk_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Helper function to set the 'safe' dividers for the CPU clock. The parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * div and mask contain the divider value and the register bit mask of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * dividers to be programmed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void exynos_set_safe_div(void __iomem *base, unsigned long div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long div0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) div0 = readl(base + E4210_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) div0 = (div0 & ~mask) | (div & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) writel(div0, base + E4210_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* handler for pre-rate change notification from parent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct exynos_cpuclk *cpuclk, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long div0, div1 = 0, mux_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* find out the divider values to use for clock data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) while ((cfg_data->prate * 1000) != ndata->new_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (cfg_data->prate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cfg_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_lock_irqsave(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * For the selected PLL clock frequency, get the pre-defined divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * values. If the clock for sclk_hpm is not sourced from apll, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * the values for DIV_COPY and DIV_HPM dividers need not be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) div0 = cfg_data->div0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) div1 = cfg_data->div1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) div1 = readl(base + E4210_DIV_CPU1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * If the old parent clock speed is less than the clock speed of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * the alternate parent, then it should be ensured that at no point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * the armclk speed is more than the old_prate until the dividers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * set. Also workaround the issue of the dividers being set to lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * values before the parent clock speed is set to new lower speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * (this can result in too high speed of armclk output clocks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) WARN_ON(alt_div >= MAX_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * In Exynos4210, ATB clock parent is also mout_core. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * ATB clock also needs to be mantained at safe speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) alt_div |= E4210_DIV0_ATB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) alt_div_mask |= E4210_DIV0_ATB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) exynos_set_safe_div(base, alt_div, alt_div_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) div0 |= alt_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* select sclk_mpll as the alternate parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) mux_reg = readl(base + E4210_SRC_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* alternate parent is active now. set the dividers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) writel(div0, base + E4210_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) writel(div1, base + E4210_DIV_CPU1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) DIV_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock_irqrestore(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* handler for post-rate change notification from parent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct exynos_cpuclk *cpuclk, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned long div = 0, div_mask = DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long mux_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* find out the divider values to use for clock data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) while ((cfg_data->prate * 1000) != ndata->new_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (cfg_data->prate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cfg_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_lock_irqsave(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* select mout_apll as the alternate parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mux_reg = readl(base + E4210_SRC_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) div_mask |= E4210_DIV0_ATB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) exynos_set_safe_div(base, div, div_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) spin_unlock_irqrestore(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Helper function to set the 'safe' dividers for the CPU clock. The parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * div and mask contain the divider value and the register bit mask of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * dividers to be programmed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long div0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) div0 = readl(base + E5433_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) div0 = (div0 & ~mask) | (div & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) writel(div0, base + E5433_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* handler for pre-rate change notification from parent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct exynos_cpuclk *cpuclk, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned long div0, div1 = 0, mux_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* find out the divider values to use for clock data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) while ((cfg_data->prate * 1000) != ndata->new_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (cfg_data->prate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cfg_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_lock_irqsave(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * For the selected PLL clock frequency, get the pre-defined divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) div0 = cfg_data->div0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) div1 = cfg_data->div1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * If the old parent clock speed is less than the clock speed of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * the alternate parent, then it should be ensured that at no point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * the armclk speed is more than the old_prate until the dividers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * set. Also workaround the issue of the dividers being set to lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * values before the parent clock speed is set to new lower speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * (this can result in too high speed of armclk output clocks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) WARN_ON(alt_div >= MAX_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) exynos5433_set_safe_div(base, alt_div, alt_div_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) div0 |= alt_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* select the alternate parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mux_reg = readl(base + E5433_MUX_SEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel(mux_reg | 1, base + E5433_MUX_SEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* alternate parent is active now. set the dividers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) writel(div0, base + E5433_DIV_CPU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) writel(div1, base + E5433_DIV_CPU1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_unlock_irqrestore(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* handler for post-rate change notification from parent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct exynos_cpuclk *cpuclk, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long div = 0, div_mask = DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long mux_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_lock_irqsave(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* select apll as the alternate parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mux_reg = readl(base + E5433_MUX_SEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) writel(mux_reg & ~1, base + E5433_MUX_SEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) exynos5433_set_safe_div(base, div, div_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spin_unlock_irqrestore(cpuclk->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * This notifier function is called for the pre-rate and post-rate change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * notifications of the parent clock of cpuclk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct clk_notifier_data *ndata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct exynos_cpuclk *cpuclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) base = cpuclk->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (event == PRE_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) else if (event == POST_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * This notifier function is called for the pre-rate and post-rate change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * notifications of the parent clock of cpuclk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct clk_notifier_data *ndata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct exynos_cpuclk *cpuclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) base = cpuclk->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (event == PRE_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) else if (event == POST_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* helper function to register a CPU clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int lookup_id, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const struct clk_hw *parent, const struct clk_hw *alt_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long num_cfgs, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct exynos_cpuclk *cpuclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct clk_init_data init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) const char *parent_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (IS_ERR(parent) || IS_ERR(alt_parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) pr_err("%s: invalid parent clock(s)\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!cpuclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) parent_name = clk_hw_get_name(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) init.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) init.flags = CLK_SET_RATE_PARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) init.parent_names = &parent_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) init.ops = &exynos_cpuclk_clk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) cpuclk->alt_parent = alt_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cpuclk->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) cpuclk->ctrl_base = ctx->reg_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cpuclk->lock = &ctx->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cpuclk->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pr_err("%s: failed to register clock notifier for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto free_cpuclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!cpuclk->cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto unregister_clk_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = clk_hw_register(NULL, &cpuclk->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pr_err("%s: could not register cpuclk %s\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto free_cpuclk_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) free_cpuclk_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) kfree(cpuclk->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unregister_clk_nb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) clk_notifier_unregister(parent->clk, &cpuclk->clk_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) free_cpuclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) kfree(cpuclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }