Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * SuperH Timer Support - CMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 2008 Magnus Damm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pm_domain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sh_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #ifdef CONFIG_SUPERH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/platform_early.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) struct sh_cmt_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * The CMT comes in 5 different identified flavours, depending not only on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * SoC but also on the particular instance. The following table lists the main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * characteristics of those flavours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *			16B	32B	32B-F	48B	R-Car Gen2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * Channels		2	1/4	1	6	2/8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * Control Width	16	16	16	16	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * Counter Width	16	32	32	32/48	32/48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * Shared Start/Stop	Y	Y	Y	Y	N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * located in the channel registers block. All other versions have a shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * start/stop register located in the global space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * Channels are indexed from 0 to N-1 in the documentation. The channel index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * infers the start/stop bit position in the control register and the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * registers block address. Some CMT instances have a subset of channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * available, in which case the index in the documentation doesn't match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * "real" index as implemented in hardware. This is for instance the case with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * in the documentation but using start/stop bit 5 and having its registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * block at 0x60.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) enum sh_cmt_model {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	SH_CMT_16BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	SH_CMT_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	SH_CMT_48BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	SH_CMT0_RCAR_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	SH_CMT1_RCAR_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) struct sh_cmt_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	enum sh_cmt_model model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	unsigned int channels_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	unsigned long width; /* 16 or 32 bit version of hardware block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u32 overflow_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u32 clear_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	/* callbacks for CMSTR and CMCSR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	u32 (*read_control)(void __iomem *base, unsigned long offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	void (*write_control)(void __iomem *base, unsigned long offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 			      u32 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	/* callbacks for CMCNT and CMCOR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	u32 (*read_count)(void __iomem *base, unsigned long offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) struct sh_cmt_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct sh_cmt_device *cmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned int index;	/* Index in the documentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	unsigned int hwidx;	/* Real hardware index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	void __iomem *iostart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	void __iomem *ioctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	unsigned int timer_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u32 match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u32 next_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u32 max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct clock_event_device ced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct clocksource cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u64 total_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	bool cs_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) struct sh_cmt_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	const struct sh_cmt_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	void __iomem *mapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	raw_spinlock_t lock; /* Protect the shared start/stop register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct sh_cmt_channel *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	unsigned int num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	unsigned int hw_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	bool has_clockevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	bool has_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define SH_CMT16_CMCSR_CMF		(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define SH_CMT16_CMCSR_CMIE		(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define SH_CMT16_CMCSR_CKS8		(0 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define SH_CMT16_CMCSR_CKS32		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define SH_CMT16_CMCSR_CKS128		(2 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define SH_CMT16_CMCSR_CKS512		(3 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define SH_CMT16_CMCSR_CKS_MASK		(3 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define SH_CMT32_CMCSR_CMF		(1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define SH_CMT32_CMCSR_OVF		(1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define SH_CMT32_CMCSR_WRFLG		(1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define SH_CMT32_CMCSR_STTF		(1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define SH_CMT32_CMCSR_STPF		(1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define SH_CMT32_CMCSR_SSIE		(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define SH_CMT32_CMCSR_CMS		(1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define SH_CMT32_CMCSR_CMM		(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define SH_CMT32_CMCSR_CMTOUT_IE	(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define SH_CMT32_CMCSR_CMR_NONE		(0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define SH_CMT32_CMCSR_CMR_DMA		(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define SH_CMT32_CMCSR_CMR_IRQ		(2 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define SH_CMT32_CMCSR_CMR_MASK		(3 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define SH_CMT32_CMCSR_DBGIVD		(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define SH_CMT32_CMCSR_CKS_RCLK8	(4 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define SH_CMT32_CMCSR_CKS_RCLK32	(5 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define SH_CMT32_CMCSR_CKS_RCLK128	(6 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define SH_CMT32_CMCSR_CKS_RCLK1	(7 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define SH_CMT32_CMCSR_CKS_MASK		(7 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return ioread16(base + (offs << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	return ioread32(base + (offs << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	iowrite16(value, base + (offs << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	iowrite32(value, base + (offs << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static const struct sh_cmt_info sh_cmt_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	[SH_CMT_16BIT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		.model = SH_CMT_16BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		.width = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		.overflow_bit = SH_CMT16_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		.clear_bits = ~SH_CMT16_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		.read_control = sh_cmt_read16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		.write_control = sh_cmt_write16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		.read_count = sh_cmt_read16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		.write_count = sh_cmt_write16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	[SH_CMT_32BIT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		.model = SH_CMT_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		.width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		.overflow_bit = SH_CMT32_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		.read_control = sh_cmt_read16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		.write_control = sh_cmt_write16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		.read_count = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		.write_count = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	[SH_CMT_48BIT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		.model = SH_CMT_48BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		.channels_mask = 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		.width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		.overflow_bit = SH_CMT32_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		.read_control = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		.write_control = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		.read_count = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		.write_count = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	[SH_CMT0_RCAR_GEN2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		.model = SH_CMT0_RCAR_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		.channels_mask = 0x60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		.width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		.overflow_bit = SH_CMT32_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		.read_control = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		.write_control = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		.read_count = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		.write_count = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	[SH_CMT1_RCAR_GEN2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		.model = SH_CMT1_RCAR_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		.channels_mask = 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		.width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		.overflow_bit = SH_CMT32_CMCSR_CMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		.read_control = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		.write_control = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		.read_count = sh_cmt_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		.write_count = sh_cmt_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define CMCSR 0 /* channel register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define CMCNT 1 /* channel register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define CMCOR 2 /* channel register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (ch->iostart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		return ch->cmt->info->read_control(ch->iostart, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (ch->iostart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		ch->cmt->info->write_control(ch->iostart, 0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	u32 v1, v2, v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	u32 o1, o2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	/* Make sure the timer value is stable. Stolen from acpi_pm.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		o2 = o1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		v1 = sh_cmt_read_cmcnt(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		v2 = sh_cmt_read_cmcnt(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		v3 = sh_cmt_read_cmcnt(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			  || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	*has_wrapped = o1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* start stop register shared by multiple timer channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	raw_spin_lock_irqsave(&ch->cmt->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	value = sh_cmt_read_cmstr(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		value |= 1 << ch->timer_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		value &= ~(1 << ch->timer_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	sh_cmt_write_cmstr(ch, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static int sh_cmt_enable(struct sh_cmt_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	int k, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	pm_runtime_get_sync(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/* enable clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	ret = clk_enable(ch->cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	/* make sure channel is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	sh_cmt_start_stop_ch(ch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	/* configure channel, periodic mode and maximum timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (ch->cmt->info->width == 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				   SH_CMT16_CMCSR_CKS512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				   SH_CMT32_CMCSR_CMTOUT_IE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				   SH_CMT32_CMCSR_CMR_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 				   SH_CMT32_CMCSR_CKS_RCLK8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	sh_cmt_write_cmcor(ch, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	sh_cmt_write_cmcnt(ch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * According to the sh73a0 user's manual, as CMCNT can be operated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * only by the RCLK (Pseudo 32 kHz), there's one restriction on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * modifying CMCNT register; two RCLK cycles are necessary before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * this register is either read or any modification of the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * it holds is reflected in the LSI's actual operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 * While at it, we're supposed to clear out the CMCNT as of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * moment, so make sure it's processed properly here.  This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * take RCLKx2 at maximum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	for (k = 0; k < 100; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		if (!sh_cmt_read_cmcnt(ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (sh_cmt_read_cmcnt(ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	/* enable channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	sh_cmt_start_stop_ch(ch, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	/* stop clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	clk_disable(ch->cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) static void sh_cmt_disable(struct sh_cmt_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	/* disable channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	sh_cmt_start_stop_ch(ch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/* disable interrupts in CMT block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	sh_cmt_write_cmcsr(ch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	/* stop clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	clk_disable(ch->cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	pm_runtime_put(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) /* private flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #define FLAG_CLOCKEVENT (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) #define FLAG_CLOCKSOURCE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #define FLAG_REPROGRAM (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #define FLAG_SKIPEVENT (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) #define FLAG_IRQCONTEXT (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 					      int absolute)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	u32 value = ch->next_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	u32 new_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	u32 delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u32 now = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	u32 has_wrapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	now = sh_cmt_get_counter(ch, &has_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	ch->flags |= FLAG_REPROGRAM; /* force reprogram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (has_wrapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		/* we're competing with the interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		 *  -> let the interrupt handler reprogram the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		 *  -> interrupt number two handles the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		ch->flags |= FLAG_SKIPEVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (absolute)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		now = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		/* reprogram the timer hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 * but don't save the new match value yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		new_match = now + value + delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (new_match > ch->max_match_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			new_match = ch->max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		sh_cmt_write_cmcor(ch, new_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		now = sh_cmt_get_counter(ch, &has_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		if (has_wrapped && (new_match > ch->match_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			/* we are changing to a greater match value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			 * so this wrap must be caused by the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			 * matching the old value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			 * -> first interrupt reprograms the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			 * -> interrupt number two handles the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			ch->flags |= FLAG_SKIPEVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		if (has_wrapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			/* we are changing to a smaller match value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			 * so the wrap must be caused by the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			 * matching the new value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			 * -> save programmed match value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			 * -> let isr handle the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			ch->match_value = new_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		/* be safe: verify hardware settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		if (now < new_match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			/* timer value is below match value, all good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			 * this makes sure we won't miss any match events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			 * -> save programmed match value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			 * -> let isr handle the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			ch->match_value = new_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		/* the counter has reached a value greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		 * than our new match value. and since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		 * has_wrapped flag isn't set we must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		 * programmed a too close event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		 * -> increase delay and retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		if (delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			delay <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		if (!delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				 ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	} while (delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (delta > ch->max_match_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			 ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	ch->next_match_value = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	sh_cmt_clock_event_program_verify(ch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	raw_spin_lock_irqsave(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	__sh_cmt_set_next(ch, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	raw_spin_unlock_irqrestore(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct sh_cmt_channel *ch = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/* clear flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			   ch->cmt->info->clear_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	/* update clock source counter to begin with if enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 * the wrap flag should be cleared by the timer specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 * isr before we end up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (ch->flags & FLAG_CLOCKSOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		ch->total_cycles += ch->match_value + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (!(ch->flags & FLAG_REPROGRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		ch->next_match_value = ch->max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	ch->flags |= FLAG_IRQCONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (ch->flags & FLAG_CLOCKEVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		if (!(ch->flags & FLAG_SKIPEVENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			if (clockevent_state_oneshot(&ch->ced)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				ch->next_match_value = ch->max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 				ch->flags |= FLAG_REPROGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			ch->ced.event_handler(&ch->ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ch->flags &= ~FLAG_SKIPEVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (ch->flags & FLAG_REPROGRAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		ch->flags &= ~FLAG_REPROGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		sh_cmt_clock_event_program_verify(ch, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		if (ch->flags & FLAG_CLOCKEVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			if ((clockevent_state_shutdown(&ch->ced))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			    || (ch->match_value == ch->next_match_value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 				ch->flags &= ~FLAG_REPROGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	ch->flags &= ~FLAG_IRQCONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	raw_spin_lock_irqsave(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		ret = sh_cmt_enable(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	ch->flags |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	/* setup timeout if no clockevent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if (ch->cmt->num_channels == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	    flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		__sh_cmt_set_next(ch, ch->max_match_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	raw_spin_unlock_irqrestore(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	unsigned long f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	raw_spin_lock_irqsave(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	ch->flags &= ~flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		sh_cmt_disable(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* adjust the timeout to maximum if only clocksource left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		__sh_cmt_set_next(ch, ch->max_match_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	raw_spin_unlock_irqrestore(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	return container_of(cs, struct sh_cmt_channel, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static u64 sh_cmt_clocksource_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	u32 has_wrapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (ch->cmt->num_channels == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		u32 raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		raw_spin_lock_irqsave(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		value = ch->total_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		raw = sh_cmt_get_counter(ch, &has_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		if (unlikely(has_wrapped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			raw += ch->match_value + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		raw_spin_unlock_irqrestore(&ch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		return value + raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return sh_cmt_get_counter(ch, &has_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static int sh_cmt_clocksource_enable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	WARN_ON(ch->cs_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	ch->total_cycles = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		ch->cs_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static void sh_cmt_clocksource_disable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	WARN_ON(!ch->cs_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	ch->cs_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) static void sh_cmt_clocksource_suspend(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (!ch->cs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void sh_cmt_clocksource_resume(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (!ch->cs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	dev_pm_genpd_resume(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	sh_cmt_start(ch, FLAG_CLOCKSOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				       const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	struct clocksource *cs = &ch->cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	cs->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	cs->rating = 125;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	cs->read = sh_cmt_clocksource_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	cs->enable = sh_cmt_clocksource_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	cs->disable = sh_cmt_clocksource_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	cs->suspend = sh_cmt_clocksource_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	cs->resume = sh_cmt_clocksource_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		 ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	clocksource_register_hz(cs, ch->cmt->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return container_of(ced, struct sh_cmt_channel, ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	sh_cmt_start(ch, FLAG_CLOCKEVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (periodic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		sh_cmt_set_next(ch, ch->max_match_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	sh_cmt_stop(ch, FLAG_CLOCKEVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 					int periodic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* deal with old setting first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		sh_cmt_stop(ch, FLAG_CLOCKEVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		 ch->index, periodic ? "periodic" : "oneshot");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	sh_cmt_clock_event_start(ch, periodic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return sh_cmt_clock_event_set_state(ced, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	return sh_cmt_clock_event_set_state(ced, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static int sh_cmt_clock_event_next(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 				   struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	BUG_ON(!clockevent_state_oneshot(ced));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (likely(ch->flags & FLAG_IRQCONTEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		ch->next_match_value = delta - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		sh_cmt_set_next(ch, delta - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	clk_unprepare(ch->cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	clk_prepare(ch->cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	dev_pm_genpd_resume(&ch->cmt->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				      const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct clock_event_device *ced = &ch->ced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	irq = platform_get_irq(ch->cmt->pdev, ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	ret = request_irq(irq, sh_cmt_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			  dev_name(&ch->cmt->pdev->dev), ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			ch->index, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	ced->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	ced->features = CLOCK_EVT_FEAT_PERIODIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	ced->features |= CLOCK_EVT_FEAT_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	ced->rating = 125;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	ced->cpumask = cpu_possible_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	ced->set_next_event = sh_cmt_clock_event_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	ced->suspend = sh_cmt_clock_event_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	ced->resume = sh_cmt_clock_event_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/* TODO: calculate good shift from rate and counter bit width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	ced->shift = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	ced->max_delta_ticks = ch->max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	ced->min_delta_ticks = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		 ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	clockevents_register_device(ced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			   bool clockevent, bool clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (clockevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		ch->cmt->has_clockevent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		ret = sh_cmt_register_clockevent(ch, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (clocksource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		ch->cmt->has_clocksource = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		sh_cmt_register_clocksource(ch, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				unsigned int hwidx, bool clockevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				bool clocksource, struct sh_cmt_device *cmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* Skip unused channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (!clockevent && !clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	ch->cmt = cmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	ch->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	ch->hwidx = hwidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	ch->timer_bit = hwidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * Compute the address of the channel control register block. For the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * timers with a per-channel start/stop register, compute its address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	switch (cmt->info->model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	case SH_CMT_16BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	case SH_CMT_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	case SH_CMT_48BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	case SH_CMT0_RCAR_GEN2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	case SH_CMT1_RCAR_GEN2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		ch->ioctrl = ch->iostart + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		ch->timer_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		ch->max_match_value = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		ch->max_match_value = (1 << cmt->info->width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	ch->match_value = ch->max_match_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	raw_spin_lock_init(&ch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			      clockevent, clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	ch->cs_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	cmt->mapbase = ioremap(mem->start, resource_size(mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (cmt->mapbase == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static const struct platform_device_id sh_cmt_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		/* deprecated, preserved for backward compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		.compatible = "renesas,cmt-48",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		.data = &sh_cmt_info[SH_CMT_48BIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		/* deprecated, preserved for backward compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		.compatible = "renesas,cmt-48-gen2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		.compatible = "renesas,r8a7740-cmt1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		.data = &sh_cmt_info[SH_CMT_48BIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		.compatible = "renesas,sh73a0-cmt1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		.data = &sh_cmt_info[SH_CMT_48BIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		.compatible = "renesas,rcar-gen2-cmt0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		.compatible = "renesas,rcar-gen2-cmt1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		.compatible = "renesas,rcar-gen3-cmt0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		.compatible = "renesas,rcar-gen3-cmt1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	cmt->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	raw_spin_lock_init(&cmt->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		cmt->info = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		cmt->hw_channels = cmt->info->channels_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	} else if (pdev->dev.platform_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		struct sh_timer_config *cfg = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		const struct platform_device_id *id = pdev->id_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		cmt->info = (const struct sh_cmt_info *)id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		cmt->hw_channels = cfg->channels_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		dev_err(&cmt->pdev->dev, "missing platform data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/* Get hold of clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	cmt->clk = clk_get(&cmt->pdev->dev, "fck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (IS_ERR(cmt->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		dev_err(&cmt->pdev->dev, "cannot get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return PTR_ERR(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	ret = clk_prepare(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		goto err_clk_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* Determine clock rate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	ret = clk_enable(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		goto err_clk_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (cmt->info->width == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		cmt->rate = clk_get_rate(cmt->clk) / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		cmt->rate = clk_get_rate(cmt->clk) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	clk_disable(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	/* Map the memory resource(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	ret = sh_cmt_map_memory(cmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		goto err_clk_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	/* Allocate and setup the channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	cmt->num_channels = hweight8(cmt->hw_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (cmt->channels == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 * Use the first channel as a clock event device and the second channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * as a clock source. If only one channel is available use it for both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		unsigned int hwidx = ffs(mask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		bool clocksource = i == 1 || cmt->num_channels == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		bool clockevent = i == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 					   clockevent, clocksource, cmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		mask &= ~(1 << hwidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	platform_set_drvdata(pdev, cmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	kfree(cmt->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	iounmap(cmt->mapbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err_clk_unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	clk_unprepare(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) err_clk_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	clk_put(cmt->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int sh_cmt_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (!is_sh_early_platform_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (cmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		dev_info(&pdev->dev, "kept as earlytimer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (cmt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	ret = sh_cmt_setup(cmt, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		kfree(cmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		pm_runtime_idle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (is_sh_early_platform_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (cmt->has_clockevent || cmt->has_clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		pm_runtime_irq_safe(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		pm_runtime_idle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static int sh_cmt_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	return -EBUSY; /* cannot unregister clockevent and clocksource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static struct platform_driver sh_cmt_device_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	.probe		= sh_cmt_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	.remove		= sh_cmt_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		.name	= "sh_cmt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		.of_match_table = of_match_ptr(sh_cmt_of_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	.id_table	= sh_cmt_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static int __init sh_cmt_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	return platform_driver_register(&sh_cmt_device_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void __exit sh_cmt_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	platform_driver_unregister(&sh_cmt_device_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) #ifdef CONFIG_SUPERH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) subsys_initcall(sh_cmt_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) module_exit(sh_cmt_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) MODULE_AUTHOR("Magnus Damm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) MODULE_DESCRIPTION("SuperH CMT Timer Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) MODULE_LICENSE("GPL v2");