^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Adjustable divider clock implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * DOC: basic adjustable divider clock that cannot gate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Traits of this clock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * prepare - clk_prepare only ensures that parents are prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * enable - clk_enable only ensures that parents are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * parent - fixed parent. No clk_set_parent support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline u32 clk_div_readl(struct clk_divider *divider)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return ioread32be(divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return readl(divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void clk_div_writel(struct clk_divider *divider, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) iowrite32be(val, divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) writel(val, divider->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int maxdiv = 0, mask = clk_div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) for (clkt = table; clkt->div; clkt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (clkt->div > maxdiv && clkt->val <= mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) maxdiv = clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return maxdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static unsigned int _get_table_mindiv(const struct clk_div_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int mindiv = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (clkt = table; clkt->div; clkt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (clkt->div < mindiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mindiv = clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return mindiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (flags & CLK_DIVIDER_ONE_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return clk_div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 1 << clk_div_mask(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return _get_table_maxdiv(table, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return clk_div_mask(width) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static unsigned int _get_table_div(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) for (clkt = table; clkt->div; clkt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (clkt->val == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static unsigned int _get_div(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned int val, unsigned long flags, u8 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (flags & CLK_DIVIDER_ONE_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return 1 << val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (flags & CLK_DIVIDER_MAX_AT_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return val ? val : clk_div_mask(width) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return _get_table_div(table, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return val + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static unsigned int _get_table_val(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (clkt = table; clkt->div; clkt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (clkt->div == div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return clkt->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static unsigned int _get_val(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned int div, unsigned long flags, u8 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (flags & CLK_DIVIDER_ONE_BASED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return __ffs(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (flags & CLK_DIVIDER_MAX_AT_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return (div == clk_div_mask(width) + 1) ? 0 : div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return _get_table_val(table, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return div - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long flags, unsigned long width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) div = _get_div(table, val, flags, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!div) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) clk_hw_get_name(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return DIV_ROUND_UP_ULL((u64)parent_rate, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL_GPL(divider_recalc_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) val = clk_div_readl(divider) >> divider->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) val &= clk_div_mask(divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return divider_recalc_rate(hw, parent_rate, val, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) divider->flags, divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static bool _is_valid_table_div(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) for (clkt = table; clkt->div; clkt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (clkt->div == div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return is_power_of_2(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return _is_valid_table_div(table, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int _round_up_table(const struct clk_div_table *table, int div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int up = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) for (clkt = table; clkt->div; clkt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (clkt->div == div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) else if (clkt->div < div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if ((clkt->div - div) < (up - div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) up = clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int _round_down_table(const struct clk_div_table *table, int div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) const struct clk_div_table *clkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int down = _get_table_mindiv(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (clkt = table; clkt->div; clkt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (clkt->div == div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else if (clkt->div > div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if ((div - clkt->div) < (div - down))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) down = clkt->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int _div_round_up(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned long parent_rate, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) div = __roundup_pow_of_two(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) div = _round_up_table(table, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int _div_round_closest(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long parent_rate, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int up, down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long up_rate, down_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) down = parent_rate / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (flags & CLK_DIVIDER_POWER_OF_TWO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) up = __roundup_pow_of_two(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) down = __rounddown_pow_of_two(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else if (table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) up = _round_up_table(table, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) down = _round_down_table(table, down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return (rate - up_rate) <= (down_rate - rate) ? up : down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int _div_round(const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long parent_rate, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (flags & CLK_DIVIDER_ROUND_CLOSEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return _div_round_closest(table, parent_rate, rate, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return _div_round_up(table, parent_rate, rate, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static bool _is_best_div(unsigned long rate, unsigned long now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long best, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (flags & CLK_DIVIDER_ROUND_CLOSEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return abs(rate - now) < abs(rate - best);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return now <= rate && now > best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int _next_div(const struct clk_div_table *table, int div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) div++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (flags & CLK_DIVIDER_POWER_OF_TWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return __roundup_pow_of_two(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return _round_up_table(table, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long *best_parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) const struct clk_div_table *table, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i, bestdiv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned long parent_rate, best = 0, now, maxdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long parent_rate_saved = *best_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) maxdiv = _get_maxdiv(table, width, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) parent_rate = *best_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bestdiv = _div_round(table, parent_rate, rate, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bestdiv = bestdiv == 0 ? 1 : bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * The maximum divider we can use without overflowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * unsigned long in rate * i below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) maxdiv = min(ULONG_MAX / rate, maxdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (i = _next_div(table, 0, flags); i <= maxdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) i = _next_div(table, i, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (rate * i == parent_rate_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * It's the most ideal case if the requested rate can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * divided from parent clock without needing to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * parent rate, so return the divider immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *best_parent_rate = parent_rate_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) parent_rate = clk_hw_round_rate(parent, rate * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (_is_best_div(rate, now, best, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bestdiv = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) best = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *best_parent_rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!bestdiv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) bestdiv = _get_maxdiv(table, width, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *best_parent_rate = clk_hw_round_rate(parent, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return bestdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long rate, unsigned long *prate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u8 width, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) div = clk_divider_bestdiv(hw, parent, rate, prate, table, width, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return DIV_ROUND_UP_ULL((u64)*prate, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) EXPORT_SYMBOL_GPL(divider_round_rate_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned long rate, unsigned long *prate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) const struct clk_div_table *table, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long flags, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) div = _get_div(table, val, flags, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Even a read-only clock can propagate a rate change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *prate = clk_hw_round_rate(parent, rate * div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return DIV_ROUND_UP_ULL((u64)*prate, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned long *prate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* if read only, just return current value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (divider->flags & CLK_DIVIDER_READ_ONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) val = clk_div_readl(divider) >> divider->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) val &= clk_div_mask(divider->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return divider_ro_round_rate(hw, rate, prate, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) divider->width, divider->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return divider_round_rate(hw, rate, prate, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) divider->width, divider->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int divider_get_val(unsigned long rate, unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) const struct clk_div_table *table, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int div, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!_is_valid_div(table, div, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) value = _get_val(table, div, flags, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return min_t(unsigned int, value, clk_div_mask(width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) EXPORT_SYMBOL_GPL(divider_get_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct clk_divider *divider = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) value = divider_get_val(rate, parent_rate, divider->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) divider->width, divider->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (divider->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) spin_lock_irqsave(divider->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) __acquire(divider->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) val = clk_div_mask(divider->width) << (divider->shift + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) val = clk_div_readl(divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) val &= ~(clk_div_mask(divider->width) << divider->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) val |= (u32)value << divider->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) clk_div_writel(divider, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (divider->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(divider->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) __release(divider->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) const struct clk_ops clk_divider_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .recalc_rate = clk_divider_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .round_rate = clk_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .set_rate = clk_divider_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) EXPORT_SYMBOL_GPL(clk_divider_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) const struct clk_ops clk_divider_ro_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .recalc_rate = clk_divider_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .round_rate = clk_divider_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct clk_hw *__clk_hw_register_divider(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct device_node *np, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) const char *parent_name, const struct clk_hw *parent_hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) const struct clk_parent_data *parent_data, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) const struct clk_div_table *table, spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct clk_divider *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct clk_init_data init = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (width + shift > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pr_warn("divider value exceeds LOWORD field\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* allocate the divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) div = kzalloc(sizeof(*div), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) init.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) init.ops = &clk_divider_ro_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) init.ops = &clk_divider_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) init.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) init.parent_names = parent_name ? &parent_name : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) init.parent_hws = parent_hw ? &parent_hw : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) init.parent_data = parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (parent_name || parent_hw || parent_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) init.num_parents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* struct clk_divider assignments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) div->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) div->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) div->width = width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) div->flags = clk_divider_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) div->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) div->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) div->table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* register the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) hw = &div->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ret = clk_hw_register(dev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kfree(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) hw = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * clk_register_divider_table - register a table based divider clock with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * the clock framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @dev: device registering this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @name: name of this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @parent_name: name of clock's parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @flags: framework-specific flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @reg: register address to adjust divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @shift: number of bits to shift the bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @width: width of the bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * @clk_divider_flags: divider-specific flags for this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @table: array of divider/value pairs ending with a div set to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @lock: shared register lock for this clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct clk *clk_register_divider_table(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) const char *parent_name, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void __iomem *reg, u8 shift, u8 width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u8 clk_divider_flags, const struct clk_div_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) hw = __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) NULL, flags, reg, shift, width, clk_divider_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) table, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (IS_ERR(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return ERR_CAST(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return hw->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) EXPORT_SYMBOL_GPL(clk_register_divider_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void clk_unregister_divider(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct clk_divider *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) hw = __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) clk_unregister(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kfree(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) EXPORT_SYMBOL_GPL(clk_unregister_divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * clk_hw_unregister_divider - unregister a clk divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * @hw: hardware-specific clock data to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void clk_hw_unregister_divider(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct clk_divider *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) div = to_clk_divider(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) clk_hw_unregister(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) kfree(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);