^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OMAP3/4 - specific DPLL control functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009-2010 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009-2010 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Written by Paul Walmsley
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Testing and integration fixes by Jouni Högander
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Menon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Parts of this code are based on code written by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/clkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/clk/ti.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "clock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* CM_AUTOIDLE_PLL*.AUTO_* bit values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DPLL_AUTOIDLE_DISABLE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define MAX_DPLL_WAIT_TRIES 1000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define OMAP3XXX_EN_DPLL_LOCKED 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Forward declarations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Private functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) v &= ~dd->enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) v |= clken_bits << __ffs(dd->enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) const char *clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) clk_name = clk_hw_get_name(&clk->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) state <<= __ffs(dd->idlest_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) != state) && i < MAX_DPLL_WAIT_TRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (i == MAX_DPLL_WAIT_TRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pr_err("clock: %s failed transition to '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) clk_name, (state) ? "locked" : "bypassed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pr_debug("clock: %s transition to '%s' in %d loops\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) clk_name, (state) ? "locked" : "bypassed", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* From 3430 TRM ES2 4.7.6.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long fint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u16 f = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) pr_debug("clock: fint is %lu\n", fint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (fint >= 750000 && fint <= 1000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) f = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) else if (fint > 1000000 && fint <= 1250000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) f = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else if (fint > 1250000 && fint <= 1500000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) f = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) else if (fint > 1500000 && fint <= 1750000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) f = 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) else if (fint > 1750000 && fint <= 2100000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) f = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else if (fint > 7500000 && fint <= 10000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) f = 0xB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else if (fint > 10000000 && fint <= 12500000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) f = 0xC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else if (fint > 12500000 && fint <= 15000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) f = 0xD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else if (fint > 15000000 && fint <= 17500000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) f = 0xE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else if (fint > 17500000 && fint <= 21000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) f = 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pr_debug("clock: unknown freqsel setting for %d\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * readiness before returning. Will save and restore the DPLL's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * autoidle state across the enable, per the CDP code. If the DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * locked successfully, return 0; if the DPLL did not lock in the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * allotted, or DPLL3 was passed in, return -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u8 state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) state <<= __ffs(dd->idlest_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Check if already locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ai = omap3_dpll_autoidle_read(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) omap3_dpll_deny_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) _omap3_dpll_write_clken(clk, DPLL_LOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) r = _omap3_wait_dpll_status(clk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) omap3_dpll_allow_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Instructs a non-CORE DPLL to enter low-power bypass mode. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * bypass mode, the DPLL's rate is set equal to its parent clock's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * rate. Waits for the DPLL to report readiness before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Will save and restore the DPLL's autoidle state across the enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * per the CDP code. If the DPLL entered bypass mode successfully,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * return 0; if the DPLL did not enter bypass in the time allotted, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * DPLL3 was passed in, or the DPLL does not support low-power bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * return -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u8 ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) pr_debug("clock: configuring DPLL %s for low-power bypass\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) clk_hw_get_name(&clk->hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ai = omap3_dpll_autoidle_read(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) r = _omap3_wait_dpll_status(clk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) omap3_dpll_allow_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * _omap3_noncore_dpll_stop - instruct a DPLL to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Instructs a non-CORE DPLL to enter low-power stop. Will save and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * restore the DPLL's autoidle state across the stop, per the CDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * code. If DPLL3 was passed in, or the DPLL does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * low-power stop, return -EINVAL; otherwise, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u8 ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ai = omap3_dpll_autoidle_read(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) omap3_dpll_allow_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * _lookup_dco - Lookup DCO used by j-type DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @dco: digital control oscillator selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @m: DPLL multiplier to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @n: DPLL divider to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * XXX This code is not needed for 3430/AM35xx; can it be optimized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * out in non-multi-OMAP builds for those chips?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long fint, clkinp; /* watch out for overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) fint = (clkinp / n) * m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (fint < 1000000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *dco = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *dco = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @sd_div: target sigma-delta divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @m: DPLL multiplier to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @n: DPLL divider to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * XXX This code is not needed for 3430/AM35xx; can it be optimized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * out in non-multi-OMAP builds for those chips?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long clkinp, sd; /* watch out for overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int mod1, mod2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * target sigma-delta to near 250MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mod1 = (clkinp * m) % (250 * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sd = (clkinp * m) / (250 * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mod2 = sd % 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) sd /= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (mod1 || mod2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *sd_div = sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @clk: struct clk * of DPLL to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @freqsel: FREQSEL value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Program the DPLL with the last M, N values calculated, and wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct dpll_data *dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u8 dco, sd_div, ai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bool errata_i810;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) _omap3_noncore_dpll_bypass(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Set jitter correction. Jitter correction applicable for OMAP343X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * only since freqsel field is no longer present on other devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) v &= ~dd->freqsel_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) v |= freqsel << __ffs(dd->freqsel_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Set DPLL multiplier, divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Handle Duty Cycle Correction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (dd->dcc_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (dd->last_rounded_rate >= dd->dcc_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) v |= dd->dcc_mask; /* Enable DCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) v &= ~dd->dcc_mask; /* Disable DCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) v &= ~(dd->mult_mask | dd->div1_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) v |= dd->last_rounded_m << __ffs(dd->mult_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Configure dco and sd_div for dplls that have these fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (dd->dco_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) v &= ~(dd->dco_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) v |= dco << __ffs(dd->dco_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (dd->sddiv_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dd->last_rounded_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) v &= ~(dd->sddiv_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) v |= sd_div << __ffs(dd->sddiv_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Errata i810 - DPLL controller can get stuck while transitioning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * to a power saving state. Software must ensure the DPLL can not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * transition to a low power state while changing M/N values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Easiest way to accomplish this is to prevent DPLL autoidle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * before doing the M/N re-program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (errata_i810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ai = omap3_dpll_autoidle_read(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ai) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) omap3_dpll_deny_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* OCP barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) omap3_dpll_autoidle_read(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Set 4X multiplier and low-power mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (dd->m4xen_mask || dd->lpmode_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (dd->m4xen_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (dd->last_rounded_m4xen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) v |= dd->m4xen_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) v &= ~dd->m4xen_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (dd->lpmode_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (dd->last_rounded_lpmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) v |= dd->lpmode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) v &= ~dd->lpmode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* We let the clock framework set the other output dividers later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* REVISIT: Set ramp-up delay? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) _omap3_noncore_dpll_lock(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (errata_i810 && ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) omap3_dpll_allow_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Public functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * omap3_dpll_recalc - recalculate DPLL rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @clk: DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Recalculate and propagate the DPLL rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return omap2_get_dpll_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * The choice of modes depends on the DPLL's programmed rate: if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * the same as the DPLL's parent clock, it will enter bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * otherwise, it will enter lock. This code will wait for the DPLL to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * indicate readiness before returning, unless the DPLL takes too long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * to enter the target state. Intended to be used as the struct clk's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * enable function. If DPLL3 was passed in, or the DPLL does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * support low-power stop, or if the DPLL took too long to enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * bypass or lock, return -EINVAL; otherwise, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int omap3_noncore_dpll_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct clk_hw *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (clk->clkdm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) WARN(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) "%s: could not enable %s's clockdomain %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) __func__, clk_hw_get_name(hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) clk->clkdm_name, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) parent = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) WARN_ON(parent != dd->clk_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) r = _omap3_noncore_dpll_bypass(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) WARN_ON(parent != dd->clk_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) r = _omap3_noncore_dpll_lock(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * @clk: pointer to a DPLL struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Instructs a non-CORE DPLL to enter low-power stop. This function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * intended for use in struct clkops. No return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) void omap3_noncore_dpll_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) _omap3_noncore_dpll_stop(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (clk->clkdm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Non-CORE DPLL rate set code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @hw: pointer to the clock to determine rate for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @req: target rate request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Determines which DPLL mode to use for reaching a desired target rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Checks whether the DPLL shall be in bypass or locked mode, and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * locked, calculates the M,N values for the DPLL via round-rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Returns a 0 on success, negative error value in failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!req->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) req->best_parent_hw = dd->clk_bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) req->rate = omap2_dpll_round_rate(hw, req->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) &req->best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) req->best_parent_hw = dd->clk_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) req->best_parent_rate = req->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @hw: pointer to the clock to set parent for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @index: parent index to select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Sets parent for a DPLL clock. This sets the DPLL into bypass or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * locked mode. Returns 0 with success, negative error value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ret = _omap3_noncore_dpll_bypass(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = _omap3_noncore_dpll_lock(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @hw: pointer to the clock to set parent for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @rate: target rate for the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @parent_rate: rate of the parent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Sets rate for a DPLL clock. First checks if the clock parent is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * reference clock (in bypass mode, the rate of the clock can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * changed) and proceeds with the rate change operation. Returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * with success, negative error value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) u16 freqsel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!hw || !rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (clk_hw_get_parent(hw) != dd->clk_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (dd->last_rounded_rate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Freqsel is available only on OMAP343X devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) WARN_ON(!freqsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) clk_hw_get_name(hw), rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = omap3_noncore_dpll_program(clk, freqsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @hw: pointer to the clock to set rate and parent for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * @rate: target rate for the DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * @parent_rate: clock rate of the DPLL parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Sets rate and parent for a DPLL clock. If new parent is the bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * clock, only selects the parent. Otherwise proceeds with a rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * change, as this will effectively also change the parent as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * DPLL is put into locked mode. Returns 0 with success, negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!hw || !rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * clk-ref at index[0], in which case we only need to set rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * the parent will be changed automatically with the lock sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * With clk-bypass case we only need to change parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret = omap3_noncore_dpll_set_parent(hw, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* DPLL autoidle read/set code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * @clk: struct clk * of the DPLL to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Return the DPLL's autoidle bits, shifted down to bit 0. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * -EINVAL if passed a null pointer or if the struct clk does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * appear to refer to a DPLL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!clk || !clk->dpll_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!dd->autoidle_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) v &= dd->autoidle_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) v >>= __ffs(dd->autoidle_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * omap3_dpll_allow_idle - enable DPLL autoidle bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @clk: struct clk * of the DPLL to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Enable DPLL automatic idle control. This automatic idle mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * switching takes effect only when the DPLL is locked, at least on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * OMAP3430. The DPLL will enter low-power stop when its downstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * clocks are gated. No return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!clk || !clk->dpll_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!dd->autoidle_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * REVISIT: CORE DPLL can optionally enter low-power bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * by writing 0x5 instead of 0x1. Add some mechanism to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * optionally enter this mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) v &= ~dd->autoidle_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * omap3_dpll_deny_idle - prevent DPLL from automatically idling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @clk: struct clk * of the DPLL to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Disable DPLL automatic idle control. No return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!clk || !clk->dpll_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!dd->autoidle_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) v &= ~dd->autoidle_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Clock control for DPLL outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Find the parent DPLL for the given clkoutx2 clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct clk_hw_omap *pclk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Walk up the parents of clk, looking for a DPLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) hw = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) } while (hw && (!omap2_clk_is_hw_omap(hw)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pclk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) } while (pclk && !pclk->dpll_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* clk does not have a DPLL as a parent? error in the clock data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!pclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return pclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @clk: DPLL output struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Using parent clock DPLL data, look up DPLL state. If locked, set our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct clk_hw_omap *pclk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pclk = omap3_find_clkoutx2_dpll(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!pclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dd = pclk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) WARN_ON(!dd->enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) v >>= __ffs(dd->enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rate = parent_rate * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * omap3_core_dpll_save_context - Save the m and n values of the divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * @hw: pointer struct clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * Before the dpll registers are lost save the last rounded rate m and n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * and the enable mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int omap3_core_dpll_save_context(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (clk->context == DPLL_LOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dd->last_rounded_m = (v & dd->mult_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) __ffs(dd->mult_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dd->last_rounded_n = ((v & dd->div1_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __ffs(dd->div1_mask)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * omap3_core_dpll_restore_context - restore the m and n values of the divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * @hw: pointer struct clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Restore the last rounded rate m and n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * and the enable mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) void omap3_core_dpll_restore_context(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (clk->context == DPLL_LOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) _omap3_dpll_write_clken(clk, 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) _omap3_wait_dpll_status(clk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) v &= ~(dd->mult_mask | dd->div1_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) v |= dd->last_rounded_m << __ffs(dd->mult_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) _omap3_dpll_write_clken(clk, DPLL_LOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) _omap3_wait_dpll_status(clk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) _omap3_dpll_write_clken(clk, clk->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * omap3_non_core_dpll_save_context - Save the m and n values of the divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * @hw: pointer struct clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Before the dpll registers are lost save the last rounded rate m and n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * and the enable mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int omap3_noncore_dpll_save_context(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (clk->context == DPLL_LOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dd->last_rounded_m = (v & dd->mult_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) __ffs(dd->mult_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dd->last_rounded_n = ((v & dd->div1_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) __ffs(dd->div1_mask)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * omap3_core_dpll_restore_context - restore the m and n values of the divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * @hw: pointer struct clk_hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * Restore the last rounded rate m and n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * and the enable mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) const struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) u32 ctrl, mult_div1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (clk->context == ((ctrl & dd->enable_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) __ffs(dd->enable_mask)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) __ffs(dd->mult_mask)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) __ffs(dd->div1_mask)) + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* nothing to be done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (clk->context == DPLL_LOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) omap3_noncore_dpll_program(clk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) _omap3_dpll_write_clken(clk, clk->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* OMAP3/4 non-CORE DPLL clkops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .allow_idle = omap3_dpll_allow_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .deny_idle = omap3_dpll_deny_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * omap3_dpll4_set_rate - set rate for omap3 per-dpll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @hw: clock to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * @rate: target rate for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * @parent_rate: rate of the parent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Check if the current SoC supports the per-dpll reprogram operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * or not, and then do the rate change if supported. Returns -EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * if not supported, 0 for success, and potential error codes from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * clock rate change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * According to the 12-5 CDP code from TI, "Limitation 2.5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * on 3430ES1 prevents us from changing DPLL multipliers or dividers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * on DPLL4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * @hw: clock to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * @rate: target rate for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @parent_rate: rate of the parent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * @index: parent index, 0 - reference clock, 1 - bypass clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * Check if the current SoC support the per-dpll reprogram operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * or not, and then do the rate + parent change if supported. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * -EINVAL if not supported, 0 for success, and potential error codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * from the clock rate change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned long parent_rate, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Apply DM3730 errata sprz319 advisory 2.1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct omap3_dpll5_settings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) unsigned int rate, m, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static const struct omap3_dpll5_settings precomputed[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * From DM3730 errata advisory 2.1, table 35 and 36.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * The N value is increased by 1 compared to the tables as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * errata lists register values while last_rounded_field is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * real divider value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) { 12000000, 80, 0 + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) { 13000000, 443, 5 + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) { 19200000, 50, 0 + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) { 26000000, 443, 11 + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) { 38400000, 25, 0 + 1 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) const struct omap3_dpll5_settings *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct clk_hw_omap *clk = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct dpll_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (parent_rate == precomputed[i].rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (i == ARRAY_SIZE(precomputed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) d = &precomputed[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Update the M, N and rounded rate values and program the DPLL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dd = clk->dpll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dd->last_rounded_m = d->m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dd->last_rounded_n = d->n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) omap3_noncore_dpll_program(clk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * omap3_dpll5_set_rate - set rate for omap3 dpll5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * @hw: clock to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * @rate: target rate for clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * @parent_rate: rate of the parent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * the DPLL is used for USB host (detected through the requested rate).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (omap3_dpll5_apply_errata(hw, parent_rate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }