^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * TI clock autoidle support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2013 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Tero Kristo <t-kristo@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * it under the terms of the GNU General Public License version 2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is distributed "as is" WITHOUT ANY WARRANTY of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * kind, whether express or implied; without even the implied warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/clk/ti.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "clock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct clk_ti_autoidle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct clk_omap_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u8 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define AUTOIDLE_LOW 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static LIST_HEAD(autoidle_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * we have some non-atomic read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * operations behind it, so lets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * take one lock for handling autoidle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * of all clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static DEFINE_SPINLOCK(autoidle_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (clk->ops && clk->ops->deny_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spin_lock_irqsave(&autoidle_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) clk->autoidle_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (clk->autoidle_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) clk->ops->deny_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (clk->ops && clk->ops->allow_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) spin_lock_irqsave(&autoidle_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) clk->autoidle_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (clk->autoidle_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) clk->ops->allow_idle(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * omap2_clk_deny_idle - disable autoidle on an OMAP clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @clk: struct clk * to disable autoidle for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Disable autoidle on an OMAP clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int omap2_clk_deny_idle(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) hw = __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (omap2_clk_is_hw_omap(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct clk_hw_omap *c = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return _omap2_clk_deny_idle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * omap2_clk_allow_idle - enable autoidle on an OMAP clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @clk: struct clk * to enable autoidle for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Enable autoidle on an OMAP clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int omap2_clk_allow_idle(struct clk *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) hw = __clk_get_hw(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (omap2_clk_is_hw_omap(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct clk_hw_omap *c = to_clk_hw_omap(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return _omap2_clk_allow_idle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void _allow_autoidle(struct clk_ti_autoidle *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) val = ti_clk_ll_ops->clk_readl(&clk->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (clk->flags & AUTOIDLE_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) val &= ~(1 << clk->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) val |= (1 << clk->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ti_clk_ll_ops->clk_writel(val, &clk->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void _deny_autoidle(struct clk_ti_autoidle *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) val = ti_clk_ll_ops->clk_readl(&clk->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (clk->flags & AUTOIDLE_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) val |= (1 << clk->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) val &= ~(1 << clk->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ti_clk_ll_ops->clk_writel(val, &clk->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * _clk_generic_allow_autoidle_all - enable autoidle for all clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Enables hardware autoidle for all registered DT clocks, which have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * the feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void _clk_generic_allow_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct clk_ti_autoidle *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) list_for_each_entry(c, &autoidle_clks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) _allow_autoidle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * _clk_generic_deny_autoidle_all - disable autoidle for all clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Disables hardware autoidle for all registered DT clocks, which have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * the feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void _clk_generic_deny_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct clk_ti_autoidle *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) list_for_each_entry(c, &autoidle_clks, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) _deny_autoidle(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * of_ti_clk_autoidle_setup - sets up hardware autoidle for a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @node: pointer to the clock device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Checks if a clock has hardware autoidle support or not (check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * for presence of 'ti,autoidle-shift' property in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * node) and sets up the hardware autoidle feature for the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * if available. If autoidle is available, the clock is also added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * to the autoidle list for later processing. Returns 0 on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * negative error value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int __init of_ti_clk_autoidle_setup(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct clk_ti_autoidle *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Check if this clock has autoidle support or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) clk = kzalloc(sizeof(*clk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) clk->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) clk->name = node->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = ti_clk_get_reg_addr(node, 0, &clk->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) kfree(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) clk->flags |= AUTOIDLE_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) list_add(&clk->node, &autoidle_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * support it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Enable clock autoidle on all OMAP clocks that have allow_idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * function pointers associated with them. This function is intended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * to be temporary until support for this is added to the common clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * code. Returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int omap2_clk_enable_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = omap2_clk_for_each(_omap2_clk_allow_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) _clk_generic_allow_autoidle_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * support it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Disable clock autoidle on all OMAP clocks that have allow_idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * function pointers associated with them. This function is intended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * to be temporary until support for this is added to the common clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * code. Returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int omap2_clk_disable_autoidle_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = omap2_clk_for_each(_omap2_clk_deny_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) _clk_generic_deny_autoidle_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }