^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2013 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2013 Linaro Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * modify it under the terms of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * published by the Free Software Foundation version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is distributed "as is" WITHOUT ANY WARRANTY of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * kind, whether express or implied; without even the implied warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "clk-kona.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * "Policies" affect the frequencies of bus clocks provided by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * CCU. (I believe these polices are named "Deep Sleep", "Economy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * "Normal", and "Turbo".) A lower policy number has lower power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * consumption, and policy 2 is the default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CCU_POLICY_COUNT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CCU_ACCESS_PASSWORD 0xA5A500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CLK_GATE_DELAY_LOOP 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Bitfield operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Produces a mask of set bits covering a range of a 32-bit value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline u32 bitfield_mask(u32 shift, u32 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ((1 << width) - 1) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Extract the value of a bitfield found within a given register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return (reg_val & bitfield_mask(shift, width)) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Replace the value of a bitfield found within a given register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 mask = bitfield_mask(shift, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return (reg_val & ~mask) | (val << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Divider and scaling helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Convert a divider into the scaled divisor value it represents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Build a scaled divider value as close as possible to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * given whole part (div_value) and fractional part (expressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * in billionths).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u64 combined;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) BUG_ON(!div_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) BUG_ON(billionths >= BILLION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) combined = (u64)div_value * BILLION + billionths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) combined <<= div->u.s.frac_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* The scaled minimum divisor representable by a divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) scaled_div_min(struct bcm_clk_div *div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (divider_is_fixed(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return (u64)div->u.fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return scaled_div_value(div, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* The scaled maximum divisor representable by a divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u64 scaled_div_max(struct bcm_clk_div *div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 reg_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (divider_is_fixed(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return (u64)div->u.fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) reg_div = ((u32)1 << div->u.s.width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return scaled_div_value(div, reg_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Convert a scaled divisor into its divider representation as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * stored in a divider register field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) divider(struct bcm_clk_div *div, u64 scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) BUG_ON(scaled_div < scaled_div_min(div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) BUG_ON(scaled_div > scaled_div_max(div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Return a rate scaled for use when dividing by a scaled divisor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) scale_rate(struct bcm_clk_div *div, u32 rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (divider_is_fixed(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return (u64)rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return (u64)rate << div->u.s.frac_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* CCU access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Read a 32-bit register value from a CCU's address space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return readl(ccu->base + reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Write a 32-bit register value into a CCU's address space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) writel(reg_val, ccu->base + reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline unsigned long ccu_lock(struct ccu_data *ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_lock_irqsave(&ccu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) spin_unlock_irqrestore(&ccu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Enable/disable write access to CCU protected registers. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * WR_ACCESS register for all CCUs is at offset 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void __ccu_write_enable(struct ccu_data *ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (ccu->write_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pr_err("%s: access already enabled for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ccu->write_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static inline void __ccu_write_disable(struct ccu_data *ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!ccu->write_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pr_err("%s: access wasn't enabled for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ccu->write_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Poll a register in a CCU's address space, returning when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * specified bit in that register's value is set (or clear). Delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * a microsecond after each read of the register. Returns true if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * successful, or false if we gave up trying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Caller must ensure the CCU lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 bit_mask = 1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bool bit_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) val = __ccu_read(ccu, reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bit_val = (val & bit_mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (bit_val == want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ccu->name, reg_offset, bit, want ? "set" : "clear");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Policy operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct bcm_policy_ctl *control = &ccu->policy.control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 go_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* If we don't need to control policy for this CCU, we're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!policy_ctl_exists(control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) offset = control->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) go_bit = control->go_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Ensure we're not busy before we start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ret = __ccu_wait_bit(ccu, offset, go_bit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pr_err("%s: ccu %s policy engine wouldn't go idle\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * If it's a synchronous request, we'll wait for the voltage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * and frequency of the active load to stabilize before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * returning. To do this we select the active load by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * setting the ATL bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * An asynchronous request instead ramps the voltage in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * background, and when that process stabilizes, the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * load is copied to the active load and the CCU frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * is switched. We do this by selecting the target load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * (ATL bit clear) and setting the request auto-copy (AC bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Note, we do NOT read-modify-write this register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mask = (u32)1 << go_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mask |= 1 << control->atl_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mask |= 1 << control->ac_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) __ccu_write(ccu, offset, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Wait for indication that operation is complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ret = __ccu_wait_bit(ccu, offset, go_bit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_err("%s: ccu %s policy engine never started\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct bcm_lvm_en *enable = &ccu->policy.enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* If we don't need to control policy for this CCU, we're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!policy_lvm_en_exists(enable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Ensure we're not busy before we start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) offset = enable->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) enable_bit = enable->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pr_err("%s: ccu %s policy engine already stopped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Now set the bit to stop the engine (NO read-modify-write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __ccu_write(ccu, offset, (u32)1 << enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Wait for indication that it has stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pr_err("%s: ccu %s policy engine never stopped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * A CCU has four operating conditions ("policies"), and some clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * can be disabled or enabled based on which policy is currently in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * effect. Such clocks have a bit in a "policy mask" register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * each policy indicating whether the clock is enabled for that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * policy or not. The bit position for a clock is the same for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * four registers, and the 32-bit registers are at consecutive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!policy_exists(policy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * We need to stop the CCU policy engine to allow update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * of our policy bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!__ccu_policy_engine_stop(ccu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pr_err("%s: unable to stop CCU %s policy engine\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * For now, if a clock defines its policy bit we just mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * it "enabled" for all four policies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) offset = policy->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mask = (u32)1 << policy->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) for (i = 0; i < CCU_POLICY_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) reg_val = __ccu_read(ccu, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) reg_val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __ccu_write(ccu, offset, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) offset += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* We're done updating; fire up the policy engine again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = __ccu_policy_engine_start(ccu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pr_err("%s: unable to restart CCU %s policy engine\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __func__, ccu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Gate operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Determine whether a clock is gated. CCU lock must be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 bit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* If there is no gate we can assume it's enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!gate_exists(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bit_mask = 1 << gate->status_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) reg_val = __ccu_read(ccu, gate->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return (reg_val & bit_mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Determine whether a clock is gated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Avoid taking the lock if we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!gate_exists(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ret = __is_clk_gate_enabled(ccu, gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Commit our desired gate state to the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Returns true if successful, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bool enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) BUG_ON(!gate_exists(gate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!gate_is_sw_controllable(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return true; /* Nothing we can change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) reg_val = __ccu_read(ccu, gate->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* For a hardware/software gate, set which is in control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (gate_is_hw_controllable(gate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mask = (u32)1 << gate->hw_sw_sel_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (gate_is_sw_managed(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) reg_val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) reg_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * If software is in control, enable or disable the gate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * If hardware is, clear the enabled bit for good measure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * If a software controlled gate can't be disabled, we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * required to write a 0 into the enable bit (but the gate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * will be enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mask = (u32)1 << gate->en_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) !gate_is_no_disable(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) reg_val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) reg_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __ccu_write(ccu, gate->offset, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* For a hardware controlled gate, we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!gate_is_sw_managed(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Otherwise wait for the gate to be in desired state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Initialize a gate. Our desired state (hardware/software select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * and if software, its enable state) is committed to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * without the usual checks to see if it's already set up that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Returns true if successful, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!gate_exists(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return __gate_commit(ccu, gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Set a gate to enabled or disabled state. Does nothing if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * gate is not currently under software control, or if it is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * in the requested state. Returns true if successful, false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * otherwise. CCU lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!gate_exists(gate) || !gate_is_sw_managed(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return true; /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!enable && gate_is_no_disable(gate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pr_warn("%s: invalid gate disable request (ignoring)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (enable == gate_is_enabled(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return true; /* No change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) gate_flip_enabled(gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = __gate_commit(ccu, gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) gate_flip_enabled(gate); /* Revert the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int clk_gate(struct ccu_data *ccu, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct bcm_clk_gate *gate, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Avoid taking the lock if we can. We quietly ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * requests to change state that don't make sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!gate_exists(gate) || !gate_is_sw_managed(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!enable && gate_is_no_disable(gate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __ccu_write_enable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) success = __clk_gate(ccu, gate, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) __ccu_write_disable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pr_err("%s: failed to %s gate for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) enable ? "enable" : "disable", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Hysteresis operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * If a clock gate requires a turn-off delay it will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * "hysteresis" register bits defined. The first, if set, enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * the delay; and if enabled, the second bit determines whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * delay is "low" or "high" (1 means high). For now, if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * defined for a clock, we set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!hyst_exists(hyst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) offset = hyst->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mask = (u32)1 << hyst->en_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mask |= (u32)1 << hyst->val_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) reg_val = __ccu_read(ccu, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) reg_val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) __ccu_write(ccu, offset, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Trigger operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Caller must ensure CCU lock is held and access is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Returns true if successful, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Trigger the clock and wait for it to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) __ccu_write(ccu, trig->offset, 1 << trig->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Divider operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Read a divider value and return the scaled divisor it represents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u32 reg_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (divider_is_fixed(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return (u64)div->u.fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) reg_val = __ccu_read(ccu, div->u.s.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Extract the full divider field from the register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Return the scaled divisor value it represents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return scaled_div_value(div, reg_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Convert a divider's scaled divisor value into its recorded form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * and commit it into the hardware divider register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Returns 0 on success. Returns -EINVAL for invalid arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct bcm_clk_div *div, struct bcm_clk_trig *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 reg_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) BUG_ON(divider_is_fixed(div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * If we're just initializing the divider, and no initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * state was defined in the device tree, we just find out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * what its current value is rather than updating it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) reg_val = __ccu_read(ccu, div->u.s.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) reg_div = bitfield_extract(reg_val, div->u.s.shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) div->u.s.width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) div->u.s.scaled_div = scaled_div_value(div, reg_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Convert the scaled divisor to the value we need to record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) reg_div = divider(div, div->u.s.scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Clock needs to be enabled before changing the rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) enabled = __is_clk_gate_enabled(ccu, gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!enabled && !__clk_gate(ccu, gate, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Replace the divider value and record the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) reg_val = __ccu_read(ccu, div->u.s.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) reg_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) __ccu_write(ccu, div->u.s.offset, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* If the trigger fails we still want to disable the gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!__clk_trigger(ccu, trig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Disable the clock again if it was disabled to begin with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!enabled && !__clk_gate(ccu, gate, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret = ret ? ret : -ENXIO; /* return first error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Initialize a divider by committing our desired state to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * without the usual checks to see if it's already set up that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Returns true if successful, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct bcm_clk_div *div, struct bcm_clk_trig *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!divider_exists(div) || divider_is_fixed(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return !__div_commit(ccu, gate, div, trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct bcm_clk_div *div, struct bcm_clk_trig *trig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) u64 scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) u64 previous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) BUG_ON(divider_is_fixed(div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) previous = div->u.s.scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (previous == scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0; /* No change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) div->u.s.scaled_div = scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) __ccu_write_enable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ret = __div_commit(ccu, gate, div, trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) __ccu_write_disable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) div->u.s.scaled_div = previous; /* Revert the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Common clock rate helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Implement the common clock framework recalc_rate method, taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * into account a divider and an optional pre-divider. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * pre-divider register pointer may be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static unsigned long clk_recalc_rate(struct ccu_data *ccu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u64 scaled_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u64 scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!divider_exists(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (parent_rate > (unsigned long)LONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return 0; /* actually this would be a caller bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * If there is a pre-divider, divide the scaled parent rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * by the pre-divider value first. In this case--to improve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * accuracy--scale the parent rate by *both* the pre-divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * value and the divider before actually computing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * result of the pre-divider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * If there's only one divider, just scale the parent rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (pre_div && divider_exists(pre_div)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u64 scaled_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) scaled_rate = scale_rate(pre_div, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) scaled_rate = scale_rate(div, scaled_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) scaled_div = divider_read_scaled(ccu, pre_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) scaled_parent_rate = scale_rate(div, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * Get the scaled divisor value, and divide the scaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * parent rate by that to determine this clock's resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) scaled_div = divider_read_scaled(ccu, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return (unsigned long)result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Compute the output rate produced when a given parent rate is fed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * into two dividers. The pre-divider can be NULL, and even if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * non-null it may be nonexistent. It's also OK for the divider to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * be nonexistent, and in that case the pre-divider is also ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * If scaled_div is non-null, it is used to return the scaled divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * value used by the (downstream) divider to produce that rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct bcm_clk_div *pre_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned long rate, unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) u64 *scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) u64 scaled_parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u64 min_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u64 max_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u64 best_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) BUG_ON(!divider_exists(div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) BUG_ON(!rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) BUG_ON(parent_rate > (u64)LONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * If there is a pre-divider, divide the scaled parent rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * by the pre-divider value first. In this case--to improve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * accuracy--scale the parent rate by *both* the pre-divider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * value and the divider before actually computing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * result of the pre-divider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * If there's only one divider, just scale the parent rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * For simplicity we treat the pre-divider as fixed (for now).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (divider_exists(pre_div)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u64 scaled_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) u64 scaled_pre_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) scaled_rate = scale_rate(pre_div, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) scaled_rate = scale_rate(div, scaled_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) scaled_pre_div = divider_read_scaled(ccu, pre_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) scaled_pre_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) scaled_parent_rate = scale_rate(div, parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * Compute the best possible divider and ensure it is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * range. A fixed divider can't be changed, so just report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * the best we can do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!divider_is_fixed(div)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) min_scaled_div = scaled_div_min(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) max_scaled_div = scaled_div_max(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (best_scaled_div > max_scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) best_scaled_div = max_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) else if (best_scaled_div < min_scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) best_scaled_div = min_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) best_scaled_div = divider_read_scaled(ccu, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* OK, figure out the resulting rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (scaled_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *scaled_div = best_scaled_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return (long)result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Common clock parent helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * For a given parent selector (register field) value, find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * index into a selector's parent_sel array that contains it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Returns the index, or BAD_CLK_INDEX if it's not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) BUG_ON(sel->parent_count > (u32)U8_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) for (i = 0; i < sel->parent_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (sel->parent_sel[i] == parent_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return BAD_CLK_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Fetch the current value of the selector, and translate that into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * its corresponding index in the parent array we registered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * the clock framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Returns parent array index that corresponds with the value found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * or BAD_CLK_INDEX if the found value is out of range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 parent_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* If there's no selector, there's only one parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!selector_exists(sel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Get the value in the selector register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) reg_val = __ccu_read(ccu, sel->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Look up that selector's parent array index and return it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) index = parent_index(sel, parent_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (index == BAD_CLK_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) __func__, parent_sel, ccu->name, sel->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Commit our desired selector value to the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Returns 0 on success. Returns -EINVAL for invalid arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) u32 parent_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) BUG_ON(!selector_exists(sel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * If we're just initializing the selector, and no initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * state was defined in the device tree, we just find out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * what its current value is rather than updating it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (sel->clk_index == BAD_CLK_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) reg_val = __ccu_read(ccu, sel->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) index = parent_index(sel, parent_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (index == BAD_CLK_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sel->clk_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) BUG_ON((u32)sel->clk_index >= sel->parent_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) parent_sel = sel->parent_sel[sel->clk_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* Clock needs to be enabled before changing the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) enabled = __is_clk_gate_enabled(ccu, gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!enabled && !__clk_gate(ccu, gate, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* Replace the selector value and record the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) reg_val = __ccu_read(ccu, sel->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) __ccu_write(ccu, sel->offset, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* If the trigger fails we still want to disable the gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!__clk_trigger(ccu, trig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* Disable the clock again if it was disabled to begin with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!enabled && !__clk_gate(ccu, gate, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ret = ret ? ret : -ENXIO; /* return first error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Initialize a selector by committing our desired state to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * without the usual checks to see if it's already set up that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * Returns true if successful, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!selector_exists(sel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return !__sel_commit(ccu, gate, sel, trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Write a new value into a selector register to switch to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * different parent clock. Returns 0 on success, or an error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * (from __sel_commit()) otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u8 previous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) previous = sel->clk_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (previous == index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0; /* No change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) sel->clk_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) __ccu_write_enable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ret = __sel_commit(ccu, gate, sel, trig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) __ccu_write_disable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) sel->clk_index = previous; /* Revert the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Clock operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int kona_peri_clk_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static void kona_peri_clk_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static int kona_peri_clk_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct peri_clk_data *data = bcm_clk->u.peri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) unsigned long *parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct bcm_clk_div *div = &bcm_clk->u.peri->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!divider_exists(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return clk_hw_get_rate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Quietly avoid a zero rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) rate ? rate : 1, *parent_rate, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static int kona_peri_clk_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct clk_hw *current_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) unsigned long parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned long best_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) unsigned long best_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u32 parent_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) u32 which;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * If there is no other parent to choose, use the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) parent_count = (u32)bcm_clk->init_data.num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (parent_count < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rate = kona_peri_clk_round_rate(hw, req->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) &req->best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (rate < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) req->rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Unless we can do better, stick with current parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) current_parent = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) parent_rate = clk_hw_get_rate(current_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) best_delta = abs(best_rate - req->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Check whether any other parent clock can produce a better result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) for (which = 0; which < parent_count; which++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) unsigned long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) unsigned long other_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) BUG_ON(!parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (parent == current_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* We don't support CLK_SET_RATE_PARENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) parent_rate = clk_hw_get_rate(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) other_rate = kona_peri_clk_round_rate(hw, req->rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) &parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) delta = abs(other_rate - req->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (delta < best_delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) best_delta = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) best_rate = other_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) req->best_parent_hw = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) req->best_parent_rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) req->rate = best_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct peri_clk_data *data = bcm_clk->u.peri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct bcm_clk_sel *sel = &data->sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct bcm_clk_trig *trig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) BUG_ON(index >= sel->parent_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* If there's only one parent we don't require a selector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (!selector_exists(sel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * The regular trigger is used by default, but if there's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * pre-trigger we want to use that instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) : &data->trig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret == -ENXIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pr_err("%s: gating failure for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) bcm_clk->init_data.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ret = -EIO; /* Don't proliferate weird errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) } else if (ret == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pr_err("%s: %strigger failed for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) trig == &data->pre_trig ? "pre-" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) bcm_clk->init_data.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct peri_clk_data *data = bcm_clk->u.peri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) index = selector_read_index(bcm_clk->ccu, &data->sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* Not all callers would handle an out-of-range value gracefully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return index == BAD_CLK_INDEX ? 0 : index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct kona_clk *bcm_clk = to_kona_clk(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct peri_clk_data *data = bcm_clk->u.peri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct bcm_clk_div *div = &data->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) u64 scaled_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (parent_rate > (unsigned long)LONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (rate == clk_hw_get_rate(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!divider_exists(div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return rate == parent_rate ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * A fixed divider can't be changed. (Nor can a fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * pre-divider be, but for now we never actually try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * change that.) Tolerate a request for a no-op change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (divider_is_fixed(&data->div))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return rate == parent_rate ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Get the scaled divisor value needed to achieve a clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * rate as close as possible to what was requested, given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * the parent clock rate supplied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rate ? rate : 1, parent_rate, &scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * We aren't updating any pre-divider at this point, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * we'll use the regular trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) &data->trig, scaled_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (ret == -ENXIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) pr_err("%s: gating failure for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) bcm_clk->init_data.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ret = -EIO; /* Don't proliferate weird errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else if (ret == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pr_err("%s: trigger failed for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) bcm_clk->init_data.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct clk_ops kona_peri_clk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .enable = kona_peri_clk_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .disable = kona_peri_clk_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .is_enabled = kona_peri_clk_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) .recalc_rate = kona_peri_clk_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) .determine_rate = kona_peri_clk_determine_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .set_parent = kona_peri_clk_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) .get_parent = kona_peri_clk_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .set_rate = kona_peri_clk_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* Put a peripheral clock into its initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static bool __peri_clk_init(struct kona_clk *bcm_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct ccu_data *ccu = bcm_clk->ccu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) struct peri_clk_data *peri = bcm_clk->u.peri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) const char *name = bcm_clk->init_data.name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct bcm_clk_trig *trig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) BUG_ON(bcm_clk->type != bcm_clk_peri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (!policy_init(ccu, &peri->policy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pr_err("%s: error initializing policy for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!gate_init(ccu, &peri->gate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) pr_err("%s: error initializing gate for %s\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!hyst_init(ccu, &peri->hyst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) pr_err("%s: error initializing hyst for %s\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) pr_err("%s: error initializing divider for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * For the pre-divider and selector, the pre-trigger is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * if it's present, otherwise we just use the regular trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) : &peri->trig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) pr_err("%s: error initializing pre-divider for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) pr_err("%s: error initializing selector for %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static bool __kona_clk_init(struct kona_clk *bcm_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) switch (bcm_clk->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) case bcm_clk_peri:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return __peri_clk_init(bcm_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* Set a CCU and all its clocks into their desired initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) bool __init kona_ccu_init(struct ccu_data *ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned int which;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct kona_clk *kona_clks = ccu->kona_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) bool success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) flags = ccu_lock(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) __ccu_write_enable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) for (which = 0; which < ccu->clk_num; which++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct kona_clk *bcm_clk = &kona_clks[which];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!bcm_clk->ccu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) success &= __kona_clk_init(bcm_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) __ccu_write_disable(ccu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ccu_unlock(ccu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }