^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define div_mask(width) ((1 << (width)) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static bool _is_best_half_div(unsigned long rate, unsigned long now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long best, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (flags & CLK_DIVIDER_ROUND_CLOSEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return abs(rate - now) <= abs(rate - best);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return now <= rate && now >= best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) val = readl(divider->reg) >> divider->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) val &= div_mask(divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) val = val * 2 + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long *best_parent_rate, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int i, bestdiv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long parent_rate, best = 0, now, maxdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bool is_bestdiv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) rate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) maxdiv = div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) parent_rate = *best_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (bestdiv < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bestdiv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bestdiv = DIV_ROUND_UP(bestdiv - 3, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * The maximum divider we can use without overflowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * unsigned long in rate * i below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) maxdiv = min(ULONG_MAX / rate, maxdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) for (i = 0; i <= maxdiv; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ((u64)rate * (i * 2 + 3)) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) (i * 2 + 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (_is_best_half_div(rate, now, best, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) is_bestdiv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bestdiv = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) best = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *best_parent_rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!is_bestdiv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bestdiv = div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) div = clk_half_divider_bestdiv(hw, rate, prate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) divider->width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) divider->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) value = DIV_ROUND_UP(value - 3, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) value = min_t(unsigned int, value, div_mask(divider->width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (divider->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_lock_irqsave(divider->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __acquire(divider->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) val = div_mask(divider->width) << (divider->shift + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) val = readl(divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) val &= ~(div_mask(divider->width) << divider->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) val |= value << divider->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) writel(val, divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (divider->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_unlock_irqrestore(divider->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __release(divider->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static const struct clk_ops clk_half_divider_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .recalc_rate = clk_half_divider_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .round_rate = clk_half_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .set_rate = clk_half_divider_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Register a clock branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Most clock branches have a form like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * src1 --|--\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * |M |--[GATE]-[DIV]-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * src2 --|--/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * sometimes without one of those components.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct clk *rockchip_clk_register_halfdiv(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const char *const *parent_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u8 num_parents, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int muxdiv_offset, u8 mux_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u8 mux_width, u8 mux_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int div_offset, u8 div_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u8 div_width, u8 div_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int gate_offset, u8 gate_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u8 gate_flags, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct clk_hw *hw = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct clk_mux *mux = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct clk_gate *gate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct clk_divider *div = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *gate_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (num_parents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) mux = kzalloc(sizeof(*mux), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!mux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mux->reg = base + muxdiv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mux->shift = mux_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mux->mask = BIT(mux_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mux->flags = mux_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mux->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) : &clk_mux_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (gate_offset >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) gate = kzalloc(sizeof(*gate), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) goto err_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) gate->flags = gate_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) gate->reg = base + gate_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) gate->bit_idx = gate_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) gate->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) gate_ops = &clk_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (div_width > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) div = kzalloc(sizeof(*div), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto err_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) div->flags = div_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (div_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) div->reg = base + div_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) div->reg = base + muxdiv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) div->shift = div_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) div->width = div_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) div->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) div_ops = &clk_half_divider_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mux ? &mux->hw : NULL, mux_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) div ? &div->hw : NULL, div_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) gate ? &gate->hw : NULL, gate_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (IS_ERR(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto err_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return hw->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err_div:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) kfree(gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) err_gate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kfree(mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ERR_CAST(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }