^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Zhu YiXin <yixin.zhu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Rahul Tanwar <rahul.tanwar@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "clk-cgu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define MAX_DDIV_REG 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define MAX_DIVIDER_VAL 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) const struct lgm_clk_branch *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) spin_lock_irqsave(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) list->div_width, list->div_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return clk_hw_register_fixed_rate(NULL, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) list->parent_data[0].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) list->flags, list->mux_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spin_lock_irqsave(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (mux->flags & MUX_CLK_SW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) val = mux->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) mux->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_unlock_irqrestore(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return clk_mux_val_to_index(hw, NULL, mux->flags, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) val = clk_mux_index_to_val(NULL, mux->flags, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spin_lock_irqsave(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (mux->flags & MUX_CLK_SW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mux->reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mux->width, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_unlock_irqrestore(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return clk_mux_determine_rate_flags(hw, req, mux->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static const struct clk_ops lgm_clk_mux_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .get_parent = lgm_clk_mux_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .set_parent = lgm_clk_mux_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .determine_rate = lgm_clk_mux_determine_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) lgm_clk_register_mux(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) const struct lgm_clk_branch *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long flags, cflags = list->mux_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u8 shift = list->mux_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u8 width = list->mux_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct clk_init_data init = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct lgm_clk_mux *mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 reg = list->mux_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!mux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) init.name = list->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) init.ops = &lgm_clk_mux_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) init.flags = list->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) init.parent_data = list->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) init.num_parents = list->num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mux->membase = ctx->membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mux->lock = ctx->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mux->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mux->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) mux->width = width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) mux->flags = cflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mux->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) hw = &mux->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret = devm_clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (cflags & CLOCK_FLAG_VAL_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_lock_irqsave(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spin_unlock_irqrestore(&mux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_lock_irqsave(÷r->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) val = lgm_get_clk_val(divider->membase, divider->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) divider->shift, divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(÷r->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return divider_recalc_rate(hw, parent_rate, val, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) divider->flags, divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return divider_round_rate(hw, rate, prate, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) divider->width, divider->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) value = divider_get_val(rate, prate, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) divider->width, divider->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) spin_lock_irqsave(÷r->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) lgm_set_clk_val(divider->membase, divider->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) divider->shift, divider->width, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) spin_unlock_irqrestore(÷r->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) div->width_gate, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int lgm_clk_divider_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return lgm_clk_divider_enable_disable(hw, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void lgm_clk_divider_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) lgm_clk_divider_enable_disable(hw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static const struct clk_ops lgm_clk_divider_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .recalc_rate = lgm_clk_divider_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .round_rate = lgm_clk_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .set_rate = lgm_clk_divider_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .enable = lgm_clk_divider_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .disable = lgm_clk_divider_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) lgm_clk_register_divider(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) const struct lgm_clk_branch *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long flags, cflags = list->div_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct lgm_clk_divider *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct clk_init_data init = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u8 shift = list->div_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u8 width = list->div_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u8 shift_gate = list->div_shift_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u8 width_gate = list->div_width_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 reg = list->div_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) init.name = list->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) init.ops = &lgm_clk_divider_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) init.flags = list->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) init.parent_data = list->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) div->membase = ctx->membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) div->lock = ctx->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) div->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) div->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) div->width = width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) div->shift_gate = shift_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) div->width_gate = width_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) div->flags = cflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) div->table = list->div_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) div->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) hw = &div->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ret = devm_clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (cflags & CLOCK_FLAG_VAL_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) const struct lgm_clk_branch *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) list->parent_data[0].name, list->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) list->mult, list->div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (IS_ERR(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return ERR_CAST(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_lock_irqsave(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) list->div_width, list->div_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int lgm_clk_gate_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) spin_lock_irqsave(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) reg = GATE_HW_REG_EN(gate->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock_irqrestore(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void lgm_clk_gate_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) spin_lock_irqsave(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) reg = GATE_HW_REG_DIS(gate->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock_irqrestore(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int reg, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_lock_irqsave(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) reg = GATE_HW_REG_STAT(gate->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irqrestore(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static const struct clk_ops lgm_clk_gate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .enable = lgm_clk_gate_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .disable = lgm_clk_gate_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .is_enabled = lgm_clk_gate_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static struct clk_hw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) lgm_clk_register_gate(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) const struct lgm_clk_branch *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long flags, cflags = list->gate_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) const char *pname = list->parent_data[0].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u8 shift = list->gate_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct clk_init_data init = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct lgm_clk_gate *gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 reg = list->gate_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) init.name = list->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) init.ops = &lgm_clk_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) init.flags = list->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) init.parent_names = pname ? &pname : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) init.num_parents = pname ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) gate->membase = ctx->membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) gate->lock = ctx->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) gate->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) gate->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) gate->flags = cflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) gate->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) hw = &gate->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret = devm_clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (cflags & CLOCK_FLAG_VAL_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_lock_irqsave(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) spin_unlock_irqrestore(&gate->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) const struct lgm_clk_branch *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) switch (list->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case CLK_TYPE_FIXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) hw = lgm_clk_register_fixed(ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case CLK_TYPE_MUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) hw = lgm_clk_register_mux(ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case CLK_TYPE_DIVIDER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hw = lgm_clk_register_divider(ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case CLK_TYPE_FIXED_FACTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) hw = lgm_clk_register_fixed_factor(ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) case CLK_TYPE_GATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) hw = lgm_clk_register_gate(ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dev_err(ctx->dev, "invalid clk type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (IS_ERR(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_err(ctx->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) "register clk: %s, type: %u failed!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) list->name, list->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ctx->clk_data.hws[list->id] = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned int div0, div1, exdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u64 prate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ddiv->shift0, ddiv->width0) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ddiv->shift1, ddiv->width1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ddiv->shift2, ddiv->width2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) prate = (u64)parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) do_div(prate, div0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) do_div(prate, div1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (exdiv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) do_div(prate, ddiv->div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) prate *= ddiv->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return prate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static int lgm_clk_ddiv_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) spin_lock_irqsave(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ddiv->width_gate, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void lgm_clk_ddiv_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_irqsave(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ddiv->width_gate, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 idx, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *ddiv1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *ddiv2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (div > MAX_DIVIDER_VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) div = MAX_DIVIDER_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (div > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) temp = DIV_ROUND_UP_ULL((u64)div, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (div % idx == 0 && temp <= MAX_DDIV_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (idx > MAX_DDIV_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *ddiv1 = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *ddiv2 = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) unsigned long prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u32 div, ddiv1, ddiv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) spin_lock_irqsave(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) div = div * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (div <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ddiv1 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ddiv2 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u32 div, ddiv1, ddiv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u64 rate64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* if predivide bit is enabled, modify div by factor of 2.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_lock_irqsave(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) div = div * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (div <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return *prate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rate64 = *prate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) do_div(rate64, ddiv1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) do_div(rate64, ddiv2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) spin_lock_irqsave(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rate64 = rate64 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spin_unlock_irqrestore(&ddiv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return rate64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static const struct clk_ops lgm_clk_ddiv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) .recalc_rate = lgm_clk_ddiv_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .enable = lgm_clk_ddiv_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) .disable = lgm_clk_ddiv_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) .set_rate = lgm_clk_ddiv_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) .round_rate = lgm_clk_ddiv_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) const struct lgm_clk_ddiv_data *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned int nr_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for (idx = 0; idx < nr_clk; idx++, list++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct clk_init_data init = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct lgm_clk_ddiv *ddiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!ddiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) init.name = list->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) init.ops = &lgm_clk_ddiv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) init.flags = list->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) init.parent_data = list->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ddiv->membase = ctx->membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ddiv->lock = ctx->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ddiv->reg = list->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ddiv->shift0 = list->shift0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ddiv->width0 = list->width0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ddiv->shift1 = list->shift1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ddiv->width1 = list->width1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ddiv->shift_gate = list->shift_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ddiv->width_gate = list->width_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ddiv->shift2 = list->ex_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ddiv->width2 = list->ex_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ddiv->flags = list->div_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ddiv->mult = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ddiv->div = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ddiv->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) hw = &ddiv->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = devm_clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) dev_err(dev, "register clk: %s failed!\n", list->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ctx->clk_data.hws[list->id] = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }