^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Marvell MVEBU CPU clock handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Gregory CLEMENT <gregory.clement@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mvebu-pmsu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PMU_DFS_RATIO_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PMU_DFS_RATIO_MASK 0x3F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define MAX_CPU 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct cpu_clk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct clk_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const char *clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) const char *parent_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void __iomem *pmu_dfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct clk **clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct clk_onecell_data clk_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 reg, div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return parent_rate / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long *parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Valid ratio are 1:1, 1:2 and 1:3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) div = *parent_rate / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) else if (div > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) div = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return *parent_rate / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 reg, div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 reload_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) div = parent_rate / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) | (div << (cpuclk->cpu * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Set clock divider reload smooth bit mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) reload_mask = 1 << (20 + cpuclk->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) | reload_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Now trigger the clock update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) | 1 << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Wait for clocks to settle down then clear reload request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) reg &= ~(reload_mask | 1 << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long fabric_div, target_div, cur_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * PMU DFS registers are not mapped, Device Tree does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * describes them. We cannot change the frequency dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!cpuclk->pmu_dfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cur_rate = clk_hw_get_rate(hwclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) SYS_CTRL_CLK_DIVIDER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Frequency is going up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (rate == 2 * cur_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) target_div = fabric_div / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Frequency is going down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) target_div = fabric_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (target_div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) target_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) reg = readl(cpuclk->pmu_dfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) reg |= (target_div << PMU_DFS_RATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) writel(reg, cpuclk->pmu_dfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return mvebu_pmsu_dfs_request(cpuclk->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (__clk_is_enabled(hwclk->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const struct clk_ops cpu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .recalc_rate = clk_cpu_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .round_rate = clk_cpu_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .set_rate = clk_cpu_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void __init of_cpu_clk_setup(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct cpu_clk *cpuclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void __iomem *clock_complex_base = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void __iomem *pmu_dfs_base = of_iomap(node, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int ncpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (clock_complex_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_err("%s: clock-complex base register not set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (pmu_dfs_base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) for_each_of_cpu_node(dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ncpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (WARN_ON(!cpuclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto cpuclk_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (WARN_ON(!clks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto clks_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for_each_of_cpu_node(dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct clk_init_data init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) char *clk_name = kzalloc(5, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int cpu, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (WARN_ON(!clk_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) err = of_property_read_u32(dn, "reg", &cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (WARN_ON(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) sprintf(clk_name, "cpu%d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cpuclk[cpu].clk_name = clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) cpuclk[cpu].cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cpuclk[cpu].reg_base = clock_complex_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (pmu_dfs_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) cpuclk[cpu].hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) init.name = cpuclk[cpu].clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) init.ops = &cpu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) init.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) init.parent_names = &cpuclk[cpu].parent_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) clk = clk_register(NULL, &cpuclk[cpu].hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (WARN_ON(IS_ERR(clk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) clks[cpu] = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) clk_data.clk_num = MAX_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) clk_data.clks = clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bail_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) kfree(clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) while(ncpus--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) kfree(cpuclk[ncpus].clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) clks_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kfree(cpuclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) cpuclk_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) iounmap(clock_complex_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) of_cpu_clk_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) of_clk_add_provider(node, of_clk_src_simple_get, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) of_mv98dx3236_cpu_clk_setup);