Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2014 Google, Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Alexandru M Stan <amstan@chromium.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) struct rockchip_mmc_clock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct clk_hw	hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	void __iomem	*reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	int		id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	int		shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	int		cached_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct notifier_block clk_rate_change_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define RK3288_MMC_CLKGEN_DIV 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 					 unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	return parent_rate / RK3288_MMC_CLKGEN_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define ROCKCHIP_MMC_DELAY_SEL BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define ROCKCHIP_MMC_DEGREE_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define PSECS_PER_SEC 1000000000000LL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static int rockchip_mmc_get_phase(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	unsigned long rate = clk_hw_get_rate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	u32 raw_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	u16 degrees;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	u32 delay_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* Constant signal, no measurable phase shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (!rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		/* degrees/delaynum * 1000000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 					36 * (rate / 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	return degrees % 360;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned long rate = clk_hw_get_rate(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u8 nineties, remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u8 delay_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	u32 raw_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32 delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * The below calculation is based on the output clock from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * MMC host to the card, which expects the phase clock inherits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * the clock rate from its parent, namely the output clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * provider of MMC host. However, things may go wrong if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * (1) It is orphan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * (2) It is assigned to the wrong parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * This check help debug the case (1), which seems to be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * most likely problem we often face and which makes it difficult
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * for people to debug unstable mmc tuning results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (!rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		pr_err("%s: invalid clk rate\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	nineties = degrees / 90;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	remainder = (degrees % 90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * Due to the inexact nature of the "fine" delay, we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * actually go non-monotonic.  We don't go _too_ monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * though, so we should be OK.  Here are options of how we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * Ideally we end up with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 *   1.0, 2.0, ..., 69.0, 70.0, ...,  89.0, 90.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * On one extreme (if delay is actually 44ps):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 *   .73, 1.5, ..., 50.6, 51.3, ...,  65.3, 90.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * The other (if delay is actually 77ps):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 *   1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * It's possible we might make a delay that is up to 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * degrees off from what we think we're making.  That's OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * though because we should be REALLY far from any bad range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * Convert to delay; do a little extra work to make sure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * don't overflow 32-bit / 64-bit numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	delay *= remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	delay = DIV_ROUND_CLOSEST(delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			(rate / 1000) * 36 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 				(ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	delay_num = (u8) min_t(u32, delay, 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	raw_value |= nineties;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	       mmc_clock->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		clk_hw_get_name(hw), degrees, delay_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		mmc_clock->reg, raw_value>>(mmc_clock->shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		rockchip_mmc_get_phase(hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static const struct clk_ops rockchip_mmc_clk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	.recalc_rate	= rockchip_mmc_recalc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	.get_phase	= rockchip_mmc_get_phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	.set_phase	= rockchip_mmc_set_phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define to_rockchip_mmc_clock(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 					unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct clk_notifier_data *ndata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 * rockchip_mmc_clk is mostly used by mmc controllers to sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * the intput data, which expects the fixed phase after the tuning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * process. However if the clock rate is changed, the phase is stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * and may break the data sampling. So here we try to restore the phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * for that case, except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * (1) cached_phase is invaild since we inevitably cached it when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * clock provider be reparented from orphan to its real parent in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 * first place. Otherwise we may mess up the initialization of MMC cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * since we only set the default sample phase and drive phase later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * (2) the new coming rate is higher than the older one since mmc driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * set the max-frequency to match the boards' ability but we can't go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * over the heads of that, otherwise the tests smoke out the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (ndata->old_rate <= ndata->new_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (event == PRE_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		mmc_clock->cached_phase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			rockchip_mmc_get_phase(&mmc_clock->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	else if (mmc_clock->cached_phase != -EINVAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		 event == POST_RATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct clk *rockchip_clk_register_mmc(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 				const char *const *parent_names, u8 num_parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 				void __iomem *reg, int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct clk_init_data init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct rockchip_mmc_clock *mmc_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (!mmc_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	init.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	init.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	init.num_parents = num_parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	init.parent_names = parent_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	init.ops = &rockchip_mmc_clk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	mmc_clock->hw.init = &init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mmc_clock->reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	mmc_clock->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	clk = clk_register(NULL, &mmc_clock->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		ret = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		goto err_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	mmc_clock->clk_rate_change_nb.notifier_call =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 				&rockchip_mmc_clk_rate_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		goto err_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) err_notifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	clk_unregister(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) err_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	kfree(mmc_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }