^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define CCM_CCDR 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define CCDR_MMDC_CH0_MASK BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define CCDR_MMDC_CH1_MASK BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) DEFINE_SPINLOCK(imx_ccm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) EXPORT_SYMBOL_GPL(imx_ccm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void imx_unregister_clocks(struct clk *clks[], unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) clk_unregister(clks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) clk_hw_unregister(hws[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL_GPL(imx_unregister_hw_clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void imx_mmdc_mask_handshake(void __iomem *ccm_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) reg = readl_relaxed(ccm_base + CCM_CCDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) reg |= chn == 0 ? CCDR_MMDC_CH0_MASK : CCDR_MMDC_CH1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) writel_relaxed(reg, ccm_base + CCM_CCDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void imx_check_clocks(struct clk *clks[], unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (IS_ERR(clks[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pr_err("i.MX clk %u: register failed with %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) i, PTR_ERR(clks[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (IS_ERR(clks[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pr_err("i.MX clk %u: register failed with %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) i, PTR_ERR(clks[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL_GPL(imx_check_clk_hws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static struct clk *imx_obtain_fixed_clock_from_dt(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct of_phandle_args phandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct clk *clk = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) path = kasprintf(GFP_KERNEL, "/clocks/%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) phandle.np = of_find_node_by_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (phandle.np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) clk = of_clk_get_from_provider(&phandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) of_node_put(phandle.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct clk *imx_obtain_fixed_clock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) const char *name, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) clk = imx_obtain_fixed_clock_from_dt(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) clk = imx_clk_fixed(name, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct clk_hw *imx_obtain_fixed_clock_hw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const char *name, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) clk = imx_obtain_fixed_clock_from_dt(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) clk = imx_clk_fixed(name, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct clk_hw * imx_obtain_fixed_clk_hw(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) clk = of_clk_get_by_name(np, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) EXPORT_SYMBOL_GPL(imx_obtain_fixed_clk_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * This fixups the register CCM_CSCMR1 write value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * The write/read/divider values of the aclk_podf field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * of that register have the relationship described by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * the following table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * write value read value divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * 3b'000 3b'110 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * 3b'001 3b'111 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * 3b'010 3b'100 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * 3b'011 3b'101 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * 3b'100 3b'010 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * 3b'101 3b'011 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * 3b'110 3b'000 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * 3b'111 3b'001 2(default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * That's why we do the xor operation below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define CSCMR1_FIXUP 0x00600000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void imx_cscmr1_fixup(u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *val ^= CSCMR1_FIXUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static bool imx_keep_uart_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int imx_enabled_uart_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct clk **imx_uart_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int __init imx_keep_uart_clocks_param(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) imx_keep_uart_clocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __setup_param("earlycon", imx_keep_uart_earlycon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) imx_keep_uart_clocks_param, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) imx_keep_uart_clocks_param, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void imx_register_uart_clocks(unsigned int clk_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) imx_enabled_uart_clocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* i.MX boards use device trees now. For build tests without CONFIG_OF, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (imx_keep_uart_clocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!of_stdout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) for (i = 0; i < clk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Stop if there are no more of_stdout references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Only enable the clock if it's not NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (imx_uart_clocks[imx_enabled_uart_clocks])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int __init imx_clk_disable_uart(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) for (i = 0; i < imx_enabled_uart_clocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) clk_disable_unprepare(imx_uart_clocks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) clk_put(imx_uart_clocks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) kfree(imx_uart_clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) late_initcall_sync(imx_clk_disable_uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) MODULE_LICENSE("GPL v2");