^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mach-omap1/clock.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Modified to use omap shared clock framework by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Tony Lindgren <tony@atomide.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mach-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <mach/hardware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "soc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "iomap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "clock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "opp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "sram.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __u32 arm_idlect1_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static LIST_HEAD(clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_MUTEX(clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static DEFINE_SPINLOCK(clockfw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Omap1 specific clock functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long omap1_uart_recalc(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int val = __raw_readl(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return val & clk->enable_bit ? 48000000 : 12000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long omap1_sossi_recalc(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 div = omap_readl(MOD_CONF_CTRL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) div = (div >> 17) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) div++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return clk->parent->rate / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void omap1_clk_allow_idle(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!(clk->flags & CLOCK_IDLE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) arm_idlect1_mask |= 1 << iclk->idlect_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void omap1_clk_deny_idle(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!(clk->flags & CLOCK_IDLE_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (iclk->no_idle_count++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static __u16 verify_ckctl_value(__u16 newval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* This function checks for following limitations set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * by the hardware (all conditions must be true):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * ARM_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * DSP_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * DSPMMU_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * In addition following rules are enforced:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * LCD_CK <= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * ARMPER_CK <= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * However, maximum frequencies are not checked for!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __u8 per_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __u8 lcd_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __u8 arm_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __u8 dsp_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __u8 tc_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __u8 dspmmu_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (dspmmu_exp < dsp_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dspmmu_exp = dsp_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (dspmmu_exp > dsp_exp+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dspmmu_exp = dsp_exp+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (tc_exp < arm_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) tc_exp = arm_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (tc_exp < dspmmu_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) tc_exp = dspmmu_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (tc_exp > lcd_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) lcd_exp = tc_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (tc_exp > per_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) per_exp = tc_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) newval &= 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) newval |= per_exp << CKCTL_PERDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) newval |= tc_exp << CKCTL_TCDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return newval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int calc_dsor_exp(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Note: If target frequency is too low, this function will return 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * which is invalid value. Caller must check for this value and act
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Note: This function does not check for following limitations set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * by the hardware (all conditions must be true):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * ARM_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * DSP_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * DSPMMU_CK >= TC_CK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long realrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct clk * parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) parent = clk->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (unlikely(parent == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) realrate = parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (realrate <= rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) realrate /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long omap1_ckctl_recalc(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Calculate divisor encoded as 2-bit exponent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return clk->parent->rate / dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Calculate divisor encoded as 2-bit exponent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * The clock control bits are in DSP domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * so api_ck is needed for access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Note that DSP_CKCTL virt addr = phys addr, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * we must use __raw_readw() instead of omap_readw().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) omap1_clk_enable(api_ck_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) omap1_clk_disable(api_ck_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return clk->parent->rate / dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* MPU virtual clock functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int omap1_select_table_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Find the highest supported frequency <= rate and switch to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct mpu_rate * ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long ref_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ref_rate = ck_ref_p->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) for (ptr = omap1_rate_table; ptr->rate; ptr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!(ptr->flags & cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (ptr->xtal != ref_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Can check only after xtal frequency check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (ptr->rate <= rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!ptr->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * In most cases we should not need to reprogram DPLL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Reprogramming the DPLL is tricky, it must be done from SRAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ck_dpll1_p->rate = ptr->pll_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u16 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dsor_exp = calc_dsor_exp(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (dsor_exp > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dsor_exp = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (dsor_exp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) regval = __raw_readw(DSP_CKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) regval &= ~(3 << clk->rate_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) regval |= dsor_exp << clk->rate_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __raw_writew(regval, DSP_CKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) clk->rate = clk->parent->rate / (1 << dsor_exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int dsor_exp = calc_dsor_exp(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (dsor_exp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (dsor_exp > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dsor_exp = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return clk->parent->rate / (1 << dsor_exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u16 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dsor_exp = calc_dsor_exp(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (dsor_exp > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dsor_exp = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (dsor_exp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return dsor_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) regval = omap_readw(ARM_CKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) regval &= ~(3 << clk->rate_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) regval |= dsor_exp << clk->rate_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) regval = verify_ckctl_value(regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) omap_writew(regval, ARM_CKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clk->rate = clk->parent->rate / (1 << dsor_exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Find the highest supported frequency <= rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct mpu_rate * ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) long highest_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long ref_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ref_rate = ck_ref_p->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) highest_rate = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) for (ptr = omap1_rate_table; ptr->rate; ptr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!(ptr->flags & cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ptr->xtal != ref_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) highest_rate = ptr->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Can check only after xtal frequency check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ptr->rate <= rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return highest_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static unsigned calc_ext_dsor(unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* MCLK and BCLK divisor selection is not linear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * freq = 96MHz / dsor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * RATIO_SEL range: dsor <-> RATIO_SEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * 0..6: (RATIO_SEL+2) <-> (dsor-2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * can not be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (dsor = 2; dsor < 96; ++dsor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if ((dsor & 1) && dsor > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (rate >= 96000000 / dsor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* XXX Only needed on 1510 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) val = __raw_readl(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (rate == 12000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) val &= ~(1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) else if (rate == 48000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) val |= (1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) __raw_writel(val, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) clk->rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* External clock (MCLK & BCLK) functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __u16 ratio_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dsor = calc_ext_dsor(rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) clk->rate = 96000000 / dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (dsor > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ratio_bits = ((dsor - 8) / 2 + 6) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ratio_bits = (dsor - 2) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __raw_writew(ratio_bits, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u32 l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long p_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) p_rate = clk->parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Round towards slower frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) div = (p_rate + rate - 1) / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) div--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (div < 0 || div > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) l = omap_readl(MOD_CONF_CTRL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) l &= ~(7 << 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) l |= div << 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) omap_writel(l, MOD_CONF_CTRL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) clk->rate = p_rate / (div + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 96000000 / calc_ext_dsor(rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) void omap1_init_ext_clk(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) __u16 ratio_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Determine current rate and ensure clock is based on 96MHz APLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ratio_bits = __raw_readw(clk->enable_reg) & ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) __raw_writew(ratio_bits, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ratio_bits = (ratio_bits & 0xfc) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (ratio_bits > 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dsor = (ratio_bits - 6) * 2 + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dsor = ratio_bits + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) clk-> rate = 96000000 / dsor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int omap1_clk_enable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (clk->usecount++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (clk->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = omap1_clk_enable(clk->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (clk->flags & CLOCK_NO_IDLE_PARENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) omap1_clk_deny_idle(clk->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = clk->ops->enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (clk->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) omap1_clk_disable(clk->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) clk->usecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void omap1_clk_disable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (clk->usecount > 0 && !(--clk->usecount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) clk->ops->disable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (likely(clk->parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) omap1_clk_disable(clk->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (clk->flags & CLOCK_NO_IDLE_PARENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) omap1_clk_allow_idle(clk->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int omap1_clk_enable_generic(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) __u16 regval16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) __u32 regval32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (unlikely(clk->enable_reg == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) clk->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (clk->flags & ENABLE_REG_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) regval32 = __raw_readl(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) regval32 |= (1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __raw_writel(regval32, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) regval16 = __raw_readw(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) regval16 |= (1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) __raw_writew(regval16, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void omap1_clk_disable_generic(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) __u16 regval16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __u32 regval32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (clk->enable_reg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (clk->flags & ENABLE_REG_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) regval32 = __raw_readl(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) regval32 &= ~(1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) __raw_writel(regval32, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) regval16 = __raw_readw(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) regval16 &= ~(1 << clk->enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) __raw_writew(regval16, clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) const struct clkops clkops_generic = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .enable = omap1_clk_enable_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .disable = omap1_clk_disable_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int omap1_clk_enable_dsp_domain(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) retval = omap1_clk_enable(api_ck_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) retval = omap1_clk_enable_generic(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) omap1_clk_disable(api_ck_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void omap1_clk_disable_dsp_domain(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (omap1_clk_enable(api_ck_p) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) omap1_clk_disable_generic(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) omap1_clk_disable(api_ck_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) const struct clkops clkops_dspck = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .enable = omap1_clk_enable_dsp_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .disable = omap1_clk_disable_dsp_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* XXX SYSC register handling does not belong in the clock framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct uart_clk *uclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = omap1_clk_enable_generic(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Set smart idle acknowledgement mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) uclk = (struct uart_clk *)clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) uclk->sysc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* XXX SYSC register handling does not belong in the clock framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct uart_clk *uclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Set force idle acknowledgement mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) uclk = (struct uart_clk *)clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) omap1_clk_disable_generic(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* XXX SYSC register handling does not belong in the clock framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) const struct clkops clkops_uart_16xx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .enable = omap1_clk_enable_uart_functional_16xx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .disable = omap1_clk_disable_uart_functional_16xx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (clk->round_rate != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return clk->round_rate(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return clk->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (clk->set_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = clk->set_rate(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Omap1 clock reset and init functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #ifdef CONFIG_OMAP_RESET_CLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void omap1_clk_disable_unused(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) __u32 regval32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Clocks in the DSP domain need api_ck. Just assume bootloader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * has not enabled any DSP clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (clk->enable_reg == DSP_IDLECT2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) clk->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Is the clock already disabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (clk->flags & ENABLE_REG_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) regval32 = __raw_readl(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) regval32 = __raw_readw(clk->enable_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if ((regval32 & (1 << clk->enable_bit)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) clk->ops->disable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) printk(" done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int clk_enable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = omap1_clk_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) EXPORT_SYMBOL(clk_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) void clk_disable(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (clk->usecount == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pr_err("Trying disable clock %s with 0 usecount\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) clk->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) omap1_clk_disable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) EXPORT_SYMBOL(clk_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned long clk_get_rate(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = clk->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) EXPORT_SYMBOL(clk_get_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Optional clock functions defined in include/linux/clk.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) long clk_round_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = omap1_clk_round_rate(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) EXPORT_SYMBOL(clk_round_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int clk_set_rate(struct clk *clk, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ret = omap1_clk_set_rate(clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) propagate_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) EXPORT_SYMBOL(clk_set_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int clk_set_parent(struct clk *clk, struct clk *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) EXPORT_SYMBOL(clk_set_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct clk *clk_get_parent(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return clk->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) EXPORT_SYMBOL(clk_get_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * OMAP specific clock functions shared between omap1 and omap2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Used for clocks that always have same value as the parent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned long followparent_recalc(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return clk->parent->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Used for clocks that have the same value as the parent clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * divided by some factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned long omap_fixed_divisor_recalc(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) WARN_ON(!clk->fixed_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return clk->parent->rate / clk->fixed_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) void clk_reparent(struct clk *child, struct clk *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) list_del_init(&child->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) list_add(&child->sibling, &parent->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) child->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* now do the debugfs renaming to reattach the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) to the proper parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Propagate rate to children */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) void propagate_rate(struct clk *tclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct clk *clkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) list_for_each_entry(clkp, &tclk->children, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (clkp->recalc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) clkp->rate = clkp->recalc(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) propagate_rate(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static LIST_HEAD(root_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * recalculate_root_clocks - recalculate and propagate all root clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Recalculates all root clocks (clocks with no parent), which if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * clock's .recalc is set correctly, should also propagate their rates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Called at init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void recalculate_root_clocks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct clk *clkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) list_for_each_entry(clkp, &root_clks, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (clkp->recalc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) clkp->rate = clkp->recalc(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) propagate_rate(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * clk_preinit - initialize any fields in the struct clk before clk init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * @clk: struct clk * to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Initialize any struct clk fields needed before normal clk initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * can run. No return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) void clk_preinit(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) INIT_LIST_HEAD(&clk->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int clk_register(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * trap out already registered clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (clk->node.next || clk->node.prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) mutex_lock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (clk->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list_add(&clk->sibling, &clk->parent->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) list_add(&clk->sibling, &root_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) list_add(&clk->node, &clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (clk->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) clk->init(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mutex_unlock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) EXPORT_SYMBOL(clk_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) void clk_unregister(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (clk == NULL || IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) mutex_lock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) list_del(&clk->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) list_del(&clk->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) mutex_unlock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) EXPORT_SYMBOL(clk_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) void clk_enable_init_clocks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct clk *clkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) list_for_each_entry(clkp, &clocks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (clkp->flags & ENABLE_ON_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) clk_enable(clkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * omap_clk_get_by_name - locate OMAP struct clk by its name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * @name: name of the struct clk to locate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * Locate an OMAP struct clk by its name. Assumes that struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * names are unique. Returns NULL if not found or a pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * struct clk if found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct clk *omap_clk_get_by_name(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct clk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct clk *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mutex_lock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) list_for_each_entry(c, &clocks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!strcmp(c->name, name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) mutex_unlock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int omap_clk_enable_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct clk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) list_for_each_entry(c, &clocks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (c->ops->allow_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) c->ops->allow_idle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int omap_clk_disable_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct clk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) list_for_each_entry(c, &clocks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (c->ops->deny_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) c->ops->deny_idle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Low level helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static int clkll_enable_null(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static void clkll_disable_null(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) const struct clkops clkops_null = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .enable = clkll_enable_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .disable = clkll_disable_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * Dummy clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Used for clock aliases that are needed on some OMAPs, but not others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct clk dummy_ck = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .name = "dummy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .ops = &clkops_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) #ifdef CONFIG_OMAP_RESET_CLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Disable any unused clocks left on by the bootloader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int __init clk_disable_unused(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct clk *ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) pr_info("clock: disabling unused clocks to save power\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) spin_lock_irqsave(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) list_for_each_entry(ck, &clocks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (ck->ops == &clkops_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (ck->usecount > 0 || !ck->enable_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) omap1_clk_disable_unused(ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) spin_unlock_irqrestore(&clockfw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) late_initcall(clk_disable_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) late_initcall(omap_clk_enable_autoidle_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * debugfs support to trace clock tree hierarchy and attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static struct dentry *clk_debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static int debug_clock_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct clk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct clk *pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mutex_lock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) seq_printf(s, "%-30s %-30s %-10s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) "clock-name", "parent-name", "rate", "use-count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) list_for_each_entry(c, &clocks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) pa = c->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) seq_printf(s, "%-30s %-30s %-10lu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) c->name, pa ? pa->name : "none", c->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) c->usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mutex_unlock(&clocks_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) DEFINE_SHOW_ATTRIBUTE(debug_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static void clk_debugfs_register_one(struct clk *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct clk *pa = c->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) c->dent = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static void clk_debugfs_register(struct clk *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct clk *pa = c->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (pa && !pa->dent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) clk_debugfs_register(pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!c->dent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) clk_debugfs_register_one(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static int __init clk_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct clk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) d = debugfs_create_dir("clock", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) clk_debugfs_root = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) list_for_each_entry(c, &clocks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) clk_debugfs_register(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) late_initcall(clk_debugfs_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */