^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2016 Maxime Ripard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Maxime Ripard <maxime.ripard@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "ccu_gate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "ccu_mult.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct _ccu_mult {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long mult, min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct _ccu_mult *mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int _mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) _mult = rate / parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (_mult < mult->min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) _mult = mult->min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (_mult > mult->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) _mult = mult->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mult->mult = _mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct clk_hw *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long *parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct ccu_mult *cm = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct _ccu_mult _cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) _cm.min = cm->mult.min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (cm->mult.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) _cm.max = cm->mult.max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ccu_mult_find_best(*parent_rate, rate, &_cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return *parent_rate * _cm.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void ccu_mult_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return ccu_gate_helper_disable(&cm->common, cm->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int ccu_mult_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return ccu_gate_helper_enable(&cm->common, cm->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int ccu_mult_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return ccu_frac_helper_read_rate(&cm->common, &cm->frac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) reg = readl(cm->common.base + cm->common.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) val = reg >> cm->mult.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) val &= (1 << cm->mult.width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return parent_rate * (val + cm->mult.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int ccu_mult_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) req, ccu_mult_round_rate, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct _ccu_mult _cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ccu_frac_helper_enable(&cm->common, &cm->frac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rate, cm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ccu_frac_helper_disable(&cm->common, &cm->frac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) _cm.min = cm->mult.min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (cm->mult.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) _cm.max = cm->mult.max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ccu_mult_find_best(parent_rate, rate, &_cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_lock_irqsave(cm->common.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) reg = readl(cm->common.base + cm->common.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) writel(reg, cm->common.base + cm->common.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_unlock_irqrestore(cm->common.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ccu_helper_wait_for_lock(&cm->common, cm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static u8 ccu_mult_get_parent(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct ccu_mult *cm = hw_to_ccu_mult(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const struct clk_ops ccu_mult_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .disable = ccu_mult_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .enable = ccu_mult_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .is_enabled = ccu_mult_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .get_parent = ccu_mult_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .set_parent = ccu_mult_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .determine_rate = ccu_mult_determine_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .recalc_rate = ccu_mult_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .set_rate = ccu_mult_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };