^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2018 NXP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Dong Aisheng <aisheng.dong@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct clk_divider_gate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct clk_divider divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u32 cached_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return container_of(div, struct clk_divider_gate, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) val = readl(div->reg) >> div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) val &= clk_div_mask(div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return divider_recalc_rate(hw, parent_rate, val, div->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) div->flags, div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) spin_lock_irqsave(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!clk_hw_is_enabled(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) val = div_gate->cached_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) val = readl(div->reg) >> div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) val &= clk_div_mask(div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) spin_unlock_irqrestore(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return divider_recalc_rate(hw, parent_rate, val, div->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) div->flags, div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return clk_divider_ops.round_rate(hw, rate, prate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) value = divider_get_val(rate, parent_rate, div->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) div->width, div->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) spin_lock_irqsave(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (clk_hw_is_enabled(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) val = readl(div->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) val &= ~(clk_div_mask(div->width) << div->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) val |= (u32)value << div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) writel(val, div->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) div_gate->cached_val = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) spin_unlock_irqrestore(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int clk_divider_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!div_gate->cached_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) spin_lock_irqsave(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* restore div val */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) val = readl(div->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) val |= div_gate->cached_val << div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writel(val, div->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irqrestore(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void clk_divider_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) spin_lock_irqsave(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* store the current div val */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) val = readl(div->reg) >> div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) val &= clk_div_mask(div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) div_gate->cached_val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) writel(0, div->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_unlock_irqrestore(div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int clk_divider_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct clk_divider *div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) val = readl(div->reg) >> div->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) val &= clk_div_mask(div->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return val ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static const struct clk_ops clk_divider_gate_ro_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .recalc_rate = clk_divider_gate_recalc_rate_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .round_rate = clk_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const struct clk_ops clk_divider_gate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .recalc_rate = clk_divider_gate_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .round_rate = clk_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .set_rate = clk_divider_gate_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .enable = clk_divider_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) .disable = clk_divider_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .is_enabled = clk_divider_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * NOTE: In order to reuse the most code from the common divider,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * we also design our divider following the way that provids an extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * flag which can be specified by user flexibly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long flags, void __iomem *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u8 shift, u8 width, u8 clk_divider_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct clk_init_data init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct clk_divider_gate *div_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) div_gate = kzalloc(sizeof(*div_gate), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!div_gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) init.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) init.ops = &clk_divider_gate_ro_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) init.ops = &clk_divider_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) init.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) init.parent_names = parent_name ? &parent_name : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) init.num_parents = parent_name ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) div_gate->divider.reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) div_gate->divider.shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) div_gate->divider.width = width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) div_gate->divider.lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) div_gate->divider.table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) div_gate->divider.hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* cache gate status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) val = readl(reg) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) val &= clk_div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) div_gate->cached_val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) hw = &div_gate->divider.hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = clk_hw_register(NULL, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) kfree(div_gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) hw = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }