Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Baikal-T1 CCU Dividers interface driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define pr_fmt(fmt) "bt1-ccu-div: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/time64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "ccu-div.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define CCU_DIV_CTL			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define CCU_DIV_CTL_EN			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define CCU_DIV_CTL_RST			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define CCU_DIV_CTL_SET_CLKDIV		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define CCU_DIV_CTL_CLKDIV_FLD		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define CCU_DIV_CTL_LOCK_SHIFTED	BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define CCU_DIV_CTL_LOCK_NORMAL		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define CCU_DIV_RST_DELAY_US		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define CCU_DIV_LOCK_CHECK_RETRIES	50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define CCU_DIV_CLKDIV_MIN		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define CCU_DIV_CLKDIV_MAX(_mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * Use the next two methods until there are generic field setter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * getter available with non-constant mask support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline u32 ccu_div_get(u32 mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline u32 ccu_div_prep(u32 mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 						  unsigned long div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	do_div(ns, ref_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 					      unsigned long div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return ref_clk / (div ?: 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static int ccu_div_var_update_clkdiv(struct ccu_div *div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				     unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				     unsigned long divider)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	unsigned long nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32 lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	nd = ccu_div_lock_delay_ns(parent_rate, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (div->features & CCU_DIV_LOCK_SHIFTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		lock = CCU_DIV_CTL_LOCK_SHIFTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		lock = CCU_DIV_CTL_LOCK_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			   CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * Until there is nsec-version of readl_poll_timeout() is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * we have to implement the next polling loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	count = CCU_DIV_LOCK_CHECK_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		ndelay(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		regmap_read(div->sys_regs, div->reg_ctl, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		if (val & lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	} while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int ccu_div_var_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!parent_hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	regmap_read(div->sys_regs, div->reg_ctl, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (val & CCU_DIV_CTL_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					ccu_div_get(div->mask, val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int ccu_div_gate_enable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void ccu_div_gate_disable(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static int ccu_div_gate_is_enabled(struct clk_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	regmap_read(div->sys_regs, div->reg_ctl, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return !!(val & CCU_DIV_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 					     unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	unsigned long divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	regmap_read(div->sys_regs, div->reg_ctl, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	divider = ccu_div_get(div->mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return ccu_div_calc_freq(parent_rate, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 						     unsigned long parent_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 						     unsigned int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	unsigned long divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	divider = parent_rate / rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		       CCU_DIV_CLKDIV_MAX(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				   unsigned long *parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	unsigned long divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return ccu_div_calc_freq(*parent_rate, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * This method is used for the clock divider blocks, which support the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * on-the-fly rate change. So due to lacking the EN bit functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * they can't be gated before the rate adjustment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 				     unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	unsigned long flags, divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		divider = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	} else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (divider == 1 || divider == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			divider = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		else if (divider == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			divider = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	val = ccu_div_prep(div->mask, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * This method is used for the clock divider blocks, which don't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * the on-the-fly rate change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				     unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	unsigned long flags, divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	val = ccu_div_prep(div->mask, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * Also disable the clock divider block if it was enabled by default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * or by the bootloader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			   div->mask | CCU_DIV_CTL_EN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 					       unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	return ccu_div_calc_freq(parent_rate, div->divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				     unsigned long *parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return ccu_div_calc_freq(*parent_rate, div->divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 				  unsigned long parent_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int ccu_div_reset_domain(struct ccu_div *div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			   CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* The next delay must be enough to cover all the resets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	udelay(CCU_DIV_RST_DELAY_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct ccu_div_dbgfs_bit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct ccu_div *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		.name = _name,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		.mask = _mask			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define CCU_DIV_DBGFS_BIT_NUM	ARRAY_SIZE(ccu_div_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * It can be dangerous to change the Divider settings behind clock framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * back, therefore we don't provide any kernel config based compile time option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * for this feature to enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	const struct ccu_div_dbgfs_bit *bit = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct ccu_div *div = bit->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	regmap_update_bits(div->sys_regs, div->reg_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			   bit->mask, val ? bit->mask : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct ccu_div *div = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		      CCU_DIV_CLKDIV_MAX(div->mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	data = ccu_div_prep(div->mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	spin_lock_irqsave(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	spin_unlock_irqrestore(&div->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #define ccu_div_dbgfs_mode		0644
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define ccu_div_dbgfs_bit_set		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define ccu_div_dbgfs_var_clkdiv_set	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define ccu_div_dbgfs_mode		0444
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	const struct ccu_div_dbgfs_bit *bit = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	struct ccu_div *div = bit->div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	u32 data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	regmap_read(div->sys_regs, div->reg_ctl, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	*val = !!(data & bit->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct ccu_div *div = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	u32 data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	regmap_read(div->sys_regs, div->reg_ctl, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	*val = ccu_div_get(div->mask, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct ccu_div *div = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	*val = div->divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct ccu_div_dbgfs_bit *bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	int didx, bidx, num = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	num += !!(div->flags & CLK_SET_RATE_GATE) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		!!(div->features & CCU_DIV_RESET_DOMAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (!bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		name = ccu_div_bits[bidx].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		if (!(div->flags & CLK_SET_RATE_GATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		    !strcmp("div_en", name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		    !strcmp("div_rst", name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		bits[didx] = ccu_div_bits[bidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		bits[didx].div = div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		if (div->features & CCU_DIV_LOCK_SHIFTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		    !strcmp("div_lock", name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 					   dentry, &bits[didx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 					   &ccu_div_dbgfs_bit_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		++didx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 				   div, &ccu_div_dbgfs_var_clkdiv_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	struct ccu_div_dbgfs_bit *bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	bit = kmalloc(sizeof(*bit), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (!bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	*bit = ccu_div_bits[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	bit->div = div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 				   &ccu_div_dbgfs_bit_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 				   &ccu_div_dbgfs_fixed_clkdiv_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	struct ccu_div *div = to_ccu_div(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				   &ccu_div_dbgfs_fixed_clkdiv_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #else /* !CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #define ccu_div_var_debug_init NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #define ccu_div_gate_debug_init NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define ccu_div_fixed_debug_init NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #endif /* !CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static const struct clk_ops ccu_div_var_gate_to_set_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	.enable = ccu_div_var_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	.disable = ccu_div_gate_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	.is_enabled = ccu_div_gate_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	.recalc_rate = ccu_div_var_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	.round_rate = ccu_div_var_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	.set_rate = ccu_div_var_set_rate_fast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	.debug_init = ccu_div_var_debug_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static const struct clk_ops ccu_div_var_nogate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	.recalc_rate = ccu_div_var_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	.round_rate = ccu_div_var_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	.set_rate = ccu_div_var_set_rate_slow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	.debug_init = ccu_div_var_debug_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static const struct clk_ops ccu_div_gate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	.enable = ccu_div_gate_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	.disable = ccu_div_gate_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	.is_enabled = ccu_div_gate_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	.recalc_rate = ccu_div_fixed_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	.round_rate = ccu_div_fixed_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	.set_rate = ccu_div_fixed_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	.debug_init = ccu_div_gate_debug_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static const struct clk_ops ccu_div_fixed_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	.recalc_rate = ccu_div_fixed_recalc_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	.round_rate = ccu_div_fixed_round_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	.set_rate = ccu_div_fixed_set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	.debug_init = ccu_div_fixed_debug_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	struct clk_parent_data parent_data = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	struct clk_init_data hw_init = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct ccu_div *div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (!div_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	div = kzalloc(sizeof(*div), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (!div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	 * Note since Baikal-T1 System Controller registers are MMIO-backed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	 * we won't check the regmap IO operations return status, because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	 * must be zero anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	div->hw.init = &hw_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	div->id = div_init->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	div->reg_ctl = div_init->base + CCU_DIV_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	div->sys_regs = div_init->sys_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	div->flags = div_init->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	div->features = div_init->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	spin_lock_init(&div->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	hw_init.name = div_init->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	hw_init.flags = div_init->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (div_init->type == CCU_DIV_VAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		if (hw_init.flags & CLK_SET_RATE_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			hw_init.ops = &ccu_div_var_gate_to_set_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			hw_init.ops = &ccu_div_var_nogate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	} else if (div_init->type == CCU_DIV_GATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		hw_init.ops = &ccu_div_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		div->divider = div_init->divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	} else if (div_init->type == CCU_DIV_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		hw_init.ops = &ccu_div_fixed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		div->divider = div_init->divider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		goto err_free_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	if (!div_init->parent_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		goto err_free_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	parent_data.fw_name = div_init->parent_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	hw_init.parent_data = &parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	hw_init.num_parents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	ret = of_clk_hw_register(div_init->np, &div->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		goto err_free_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	return div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) err_free_div:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	kfree(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void ccu_div_hw_unregister(struct ccu_div *div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	clk_hw_unregister(&div->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	kfree(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }