^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014 Marvell Technology Group Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Alexandre Belloni <alexandre.belloni@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "berlin2-div.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Clock dividers in Berlin2 SoCs comprise a complex cell to select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * input pll and divider. The virtual structure as it is used in Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * BSP code can be seen as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * pll0 --------------->| 0 | +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * +---+ |(B)|--+--------------->| 0 | +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * pll1.0 -->| 0 | +-->| 1 | | +--------+ |(E)|----->| 0 | +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * pll1.1 -->| 1 | | +---+ +-->|(C) 1:M |-->| 1 | |(F)|-->|(G)|->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ... -->|(A)|--+ | +--------+ +---+ +-->| 1 | +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ... -->| | +-->|(D) 1:3 |----------+ +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * pll1.N -->| N | +---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * +---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * (A) input pll clock mux controlled by <PllSelect[1:n]>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * (B) input pll bypass mux controlled by <PllSwitch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * (C) programmable clock divider controlled by <Select[1:n]>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * (D) constant div-by-3 clock divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * (E) programmable clock divider bypass controlled by <Switch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * (F) constant div-by-3 clock mux controlled by <D3Switch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * (G) clock gate controlled by <Enable>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * For whatever reason, above control signals come in two flavors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * - single register dividers with all bits in one register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * - shared register dividers with bits spread over multiple registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * (including signals for the same cell spread over consecutive registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Also, clock gate and pll mux is not available on every div cell, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * we have to deal with those, too. We reuse common clock composite driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PLL_SELECT_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DIV_SELECT_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct berlin2_div {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct clk_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct berlin2_div_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define to_berlin2_div(hw) container_of(hw, struct berlin2_div, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static u8 clk_div[] = { 1, 2, 4, 6, 8, 12, 1, 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int berlin2_div_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) reg = readl_relaxed(div->base + map->gate_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) reg >>= map->gate_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return (reg & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int berlin2_div_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) reg = readl_relaxed(div->base + map->gate_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) reg |= BIT(map->gate_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) writel_relaxed(reg, div->base + map->gate_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void berlin2_div_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) reg = readl_relaxed(div->base + map->gate_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) reg &= ~BIT(map->gate_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) writel_relaxed(reg, div->base + map->gate_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int berlin2_div_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* index == 0 is PLL_SWITCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) reg = readl_relaxed(div->base + map->pll_switch_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) reg &= ~BIT(map->pll_switch_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) reg |= BIT(map->pll_switch_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) writel_relaxed(reg, div->base + map->pll_switch_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* index > 0 is PLL_SELECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) reg = readl_relaxed(div->base + map->pll_select_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) reg &= ~(PLL_SELECT_MASK << map->pll_select_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) reg |= (index - 1) << map->pll_select_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) writel_relaxed(reg, div->base + map->pll_select_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static u8 berlin2_div_get_parent(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u8 index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* PLL_SWITCH == 0 is index 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) reg = readl_relaxed(div->base + map->pll_switch_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) reg &= BIT(map->pll_switch_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) reg = readl_relaxed(div->base + map->pll_select_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) reg >>= map->pll_select_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) reg &= PLL_SELECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) index = 1 + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static unsigned long berlin2_div_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct berlin2_div *div = to_berlin2_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct berlin2_div_map *map = &div->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 divsw, div3sw, divider = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) divsw = readl_relaxed(div->base + map->div_switch_offs) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) (1 << map->div_switch_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) div3sw = readl_relaxed(div->base + map->div3_switch_offs) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (1 << map->div3_switch_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* constant divide-by-3 (dominant) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (div3sw != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) divider = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* divider can be bypassed with DIV_SWITCH == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } else if (divsw == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) divider = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* clock divider determined by DIV_SELECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) reg = readl_relaxed(div->base + map->div_select_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) reg >>= map->div_select_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) reg &= DIV_SELECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) divider = clk_div[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (div->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) spin_unlock(div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return parent_rate / divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static const struct clk_ops berlin2_div_rate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .recalc_rate = berlin2_div_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static const struct clk_ops berlin2_div_gate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .is_enabled = berlin2_div_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .enable = berlin2_div_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .disable = berlin2_div_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static const struct clk_ops berlin2_div_mux_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .set_parent = berlin2_div_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .get_parent = berlin2_div_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct clk_hw * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) berlin2_div_register(const struct berlin2_div_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void __iomem *base, const char *name, u8 div_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const char **parent_names, int num_parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long flags, spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) const struct clk_ops *mux_ops = &berlin2_div_mux_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) const struct clk_ops *rate_ops = &berlin2_div_rate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const struct clk_ops *gate_ops = &berlin2_div_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct berlin2_div *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) div = kzalloc(sizeof(*div), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* copy div_map to allow __initconst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) memcpy(&div->map, map, sizeof(*map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) div->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) div->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if ((div_flags & BERLIN2_DIV_HAS_GATE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) gate_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if ((div_flags & BERLIN2_DIV_HAS_MUX) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mux_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return clk_hw_register_composite(NULL, name, parent_names, num_parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) &div->hw, mux_ops, &div->hw, rate_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) &div->hw, gate_ops, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }