^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2013 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2013 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Thomas Abraham <thomas.ab@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file includes utility functions to register clocks to common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * clock framework for Samsung platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static LIST_HEAD(clock_reg_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void samsung_clk_save(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct samsung_clk_reg_dump *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) for (; num_regs > 0; --num_regs, ++rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) rd->value = readl(base + rd->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void samsung_clk_restore(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) const struct samsung_clk_reg_dump *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) for (; num_regs > 0; --num_regs, ++rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) writel(rd->value, base + rd->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) const unsigned long *rdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long nr_rdump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct samsung_clk_reg_dump *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) for (i = 0; i < nr_rdump; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rd[i].offset = rdump[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* setup the essentials required to support clock lookup using ccf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void __iomem *base, unsigned long nr_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct samsung_clk_provider *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ctx = kzalloc(struct_size(ctx, clk_data.hws, nr_clks), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) panic("could not allocate clock provider context.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) for (i = 0; i < nr_clks; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ctx->reg_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ctx->clk_data.num = nr_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_lock_init(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void __init samsung_clk_of_add_provider(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct samsung_clk_provider *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) &ctx->clk_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) panic("could not register clk provider\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* add a clock instance to the clock lookup table used for dt based lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct clk_hw *clk_hw, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ctx->clk_data.hws[id] = clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* register a list of aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const struct samsung_clock_alias *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!list->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pr_err("%s: clock id missing for index %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) clk_hw = ctx->clk_data.hws[list->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!clk_hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pr_err("%s: failed to find clock %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ret = clk_hw_register_clkdev(clk_hw, list->alias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) list->dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pr_err("%s: failed to register lookup %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __func__, list->alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* register a list of fixed clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const struct samsung_fixed_rate_clock *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) list->parent_name, list->flags, list->fixed_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (IS_ERR(clk_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pr_err("%s: failed to register clock %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) samsung_clk_add_lookup(ctx, clk_hw, list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Unconditionally add a clock lookup for the fixed rate clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * There are not many of these on any of Samsung platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ret = clk_hw_register_clkdev(clk_hw, list->name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pr_err("%s: failed to register clock lookup for %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) __func__, list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* register a list of fixed factor clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) list->parent_name, list->flags, list->mult, list->div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (IS_ERR(clk_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_err("%s: failed to register clock %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) samsung_clk_add_lookup(ctx, clk_hw, list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* register a list of mux clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const struct samsung_mux_clock *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) clk_hw = clk_hw_register_mux(ctx->dev, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) list->parent_names, list->num_parents, list->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ctx->reg_base + list->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) list->shift, list->width, list->mux_flags, &ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (IS_ERR(clk_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pr_err("%s: failed to register clock %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) samsung_clk_add_lookup(ctx, clk_hw, list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* register a list of div clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const struct samsung_div_clock *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (list->table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) clk_hw = clk_hw_register_divider_table(ctx->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) list->name, list->parent_name, list->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ctx->reg_base + list->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) list->shift, list->width, list->div_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) list->table, &ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) clk_hw = clk_hw_register_divider(ctx->dev, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) list->parent_name, list->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ctx->reg_base + list->offset, list->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) list->width, list->div_flags, &ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (IS_ERR(clk_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pr_err("%s: failed to register clock %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) samsung_clk_add_lookup(ctx, clk_hw, list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* register a list of gate clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const struct samsung_gate_clock *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct clk_hw *clk_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list->flags, ctx->reg_base + list->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) list->bit_idx, list->gate_flags, &ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (IS_ERR(clk_hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_err("%s: failed to register clock %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) samsung_clk_add_lookup(ctx, clk_hw, list->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * obtain the clock speed of all external fixed clock sources from device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * tree and register it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct samsung_fixed_rate_clock *fixed_rate_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned int nr_fixed_rate_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const struct of_device_id *clk_matches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct device_node *clk_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for_each_matching_node_and_match(clk_np, clk_matches, &match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (of_property_read_u32(clk_np, "clock-frequency", &freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) fixed_rate_clk[(unsigned long)match->data].fixed_rate = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* utility function to get the rate of a specified clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned long _get_rate(const char *clk_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) clk = __clk_lookup(clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pr_err("%s: could not find clock %s\n", __func__, clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int samsung_clk_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct samsung_clock_reg_cache *reg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) list_for_each_entry(reg_cache, &clock_reg_cache_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) reg_cache->rd_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) samsung_clk_restore(reg_cache->reg_base, reg_cache->rsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) reg_cache->rsuspend_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void samsung_clk_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct samsung_clock_reg_cache *reg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) reg_cache->rd_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static struct syscore_ops samsung_clk_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .suspend = samsung_clk_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .resume = samsung_clk_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) void samsung_clk_extended_sleep_init(void __iomem *reg_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) const unsigned long *rdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long nr_rdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const struct samsung_clk_reg_dump *rsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long nr_rsuspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct samsung_clock_reg_cache *reg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!reg_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) panic("could not allocate register reg_cache.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!reg_cache->rdump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) panic("could not allocate register dump storage.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (list_empty(&clock_reg_cache_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) register_syscore_ops(&samsung_clk_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) reg_cache->reg_base = reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) reg_cache->rd_num = nr_rdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) reg_cache->rsuspend = rsuspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) reg_cache->rsuspend_num = nr_rsuspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) list_add_tail(®_cache->node, &clock_reg_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Common function which registers plls, muxes, dividers and gates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * for each CMU. It also add CMU register list to register cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct samsung_clk_provider * __init samsung_cmu_register_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const struct samsung_cmu_info *cmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct samsung_clk_provider *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) reg_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!reg_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) panic("%s: failed to map registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (cmu->pll_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (cmu->mux_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) samsung_clk_register_mux(ctx, cmu->mux_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) cmu->nr_mux_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (cmu->div_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (cmu->gate_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) samsung_clk_register_gate(ctx, cmu->gate_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) cmu->nr_gate_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (cmu->fixed_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cmu->nr_fixed_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (cmu->fixed_factor_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) cmu->nr_fixed_factor_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (cmu->clk_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) samsung_clk_extended_sleep_init(reg_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) cmu->clk_regs, cmu->nr_clk_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) cmu->suspend_regs, cmu->nr_suspend_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) samsung_clk_of_add_provider(np, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }