^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Atmel Corporation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Nicolas Ferre <nicolas.ferre@atmel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk/at91_pmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "pmc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define GENERATED_MAX_DIV 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct clk_generated {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct clk_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct regmap *regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct clk_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 *mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 gckdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) const struct clk_pcr_layout *layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u8 parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int chg_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define to_clk_generated(hw) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) container_of(hw, struct clk_generated, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int clk_generated_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __func__, gck->gckdiv, gck->parent_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) spin_lock_irqsave(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) regmap_write(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) (gck->id & gck->layout->pid_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) regmap_update_bits(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) gck->layout->cmd | AT91_PMC_PCR_GCKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) field_prep(gck->layout->gckcss_mask, gck->parent_id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) gck->layout->cmd |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) AT91_PMC_PCR_GCKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) spin_unlock_irqrestore(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void clk_generated_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spin_lock_irqsave(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) regmap_write(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (gck->id & gck->layout->pid_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) regmap_update_bits(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) gck->layout->cmd | AT91_PMC_PCR_GCKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) gck->layout->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_unlock_irqrestore(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int clk_generated_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_lock_irqsave(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) regmap_write(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) (gck->id & gck->layout->pid_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) regmap_read(gck->regmap, gck->layout->offset, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spin_unlock_irqrestore(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return !!(status & AT91_PMC_PCR_GCKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) clk_generated_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void clk_generated_best_diff(struct clk_rate_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct clk_hw *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long parent_rate, u32 div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int *best_diff, long *best_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long tmp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int tmp_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) tmp_rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) tmp_rate = parent_rate / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) tmp_diff = abs(req->rate - tmp_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (*best_diff < 0 || *best_diff >= tmp_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *best_rate = tmp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *best_diff = tmp_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) req->best_parent_rate = parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) req->best_parent_hw = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int clk_generated_determine_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct clk_rate_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct clk_hw *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct clk_rate_request req_parent = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) long best_rate = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long min_rate, parent_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int best_diff = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* do not look for a rate that is outside of our range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (gck->range.max && req->rate > gck->range.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) req->rate = gck->range.max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (gck->range.min && req->rate < gck->range.min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) req->rate = gck->range.min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (gck->chg_pid == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) parent = clk_hw_get_parent_by_index(hw, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) parent_rate = clk_hw_get_rate(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!parent_rate ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) (gck->range.max && min_rate > gck->range.max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (div > GENERATED_MAX_DIV + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) div = GENERATED_MAX_DIV + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) clk_generated_best_diff(req, parent, parent_rate, div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) &best_diff, &best_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!best_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * The audio_pll rate can be modified, unlike the five others clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * that should never be altered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * The audio_pll can technically be used by multiple consumers. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * with the rate locking, the first consumer to enable to clock will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * the one definitely setting the rate of the clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Since audio IPs are most likely to request the same rate, we enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * that the only clks able to modify gck rate are those of audio IPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (gck->chg_pid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) req_parent.rate = req->rate * div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (__clk_determine_rate(parent, &req_parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) clk_generated_best_diff(req, parent, req_parent.rate, div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) &best_diff, &best_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!best_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __func__, best_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __clk_get_name((req->best_parent_hw)->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) req->best_parent_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) req->rate = best_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (index >= clk_hw_get_num_parents(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (gck->mux_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) gck->parent_id = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static u8 clk_generated_get_parent(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return gck->parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int clk_generated_set_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct clk_generated *gck = to_clk_generated(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (gck->range.max && rate > gck->range.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) div = DIV_ROUND_CLOSEST(parent_rate, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (div > GENERATED_MAX_DIV + 1 || !div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) gck->gckdiv = div - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static const struct clk_ops generated_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .enable = clk_generated_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .disable = clk_generated_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .is_enabled = clk_generated_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .recalc_rate = clk_generated_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .determine_rate = clk_generated_determine_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .get_parent = clk_generated_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .set_parent = clk_generated_set_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .set_rate = clk_generated_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * clk_generated_startup - Initialize a given clock to its default parent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * divisor parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @gck: Generated clock to set the startup parameters for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Take parameters from the hardware and update local clock configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void clk_generated_startup(struct clk_generated *gck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_lock_irqsave(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) regmap_write(gck->regmap, gck->layout->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) (gck->id & gck->layout->pid_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) regmap_read(gck->regmap, gck->layout->offset, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_unlock_irqrestore(gck->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct clk_hw * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) const struct clk_pcr_layout *layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) const char *name, const char **parent_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 *mux_table, u8 num_parents, u8 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) const struct clk_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int chg_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct clk_generated *gck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct clk_init_data init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct clk_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) gck = kzalloc(sizeof(*gck), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!gck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) init.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) init.ops = &generated_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) init.parent_names = parent_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) init.num_parents = num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (chg_pid >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) init.flags |= CLK_SET_RATE_PARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) gck->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) gck->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) gck->regmap = regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) gck->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) gck->range = *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) gck->chg_pid = chg_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) gck->layout = layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) gck->mux_table = mux_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) clk_generated_startup(gck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) hw = &gck->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ret = clk_hw_register(NULL, &gck->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kfree(gck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) hw = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pmc_register_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }