Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/drivers/clocksource/arm_arch_timer.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 2011 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define pr_fmt(fmt) 	"arch_timer: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/arch_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <clocksource/arm_arch_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define CNTTIDR		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define CNTACR(n)	(0x40 + ((n) * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define CNTACR_RPCT	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define CNTACR_RVCT	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define CNTACR_RFRQ	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define CNTACR_RVOFF	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define CNTACR_RWVT	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define CNTACR_RWPT	BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define CNTVCT_LO	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define CNTVCT_HI	0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define CNTFRQ		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define CNTP_TVAL	0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define CNTP_CTL	0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define CNTV_TVAL	0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define CNTV_CTL	0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static unsigned arch_timers_present __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static void __iomem *arch_counter_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) struct arch_timer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct clock_event_device evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static u32 arch_timer_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static struct clock_event_device __percpu *arch_timer_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static bool arch_timer_c3stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static bool arch_timer_mem_use_virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static bool arch_counter_suspend_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #ifdef CONFIG_GENERIC_GETTIMEOFDAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #endif /* CONFIG_GENERIC_GETTIMEOFDAY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static cpumask_t evtstrm_available = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int __init early_evtstrm_cfg(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	return strtobool(buf, &evtstrm_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * Architected system timer support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 			  struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		struct arch_timer *timer = to_arch_timer(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		case ARCH_TIMER_REG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			writel_relaxed(val, timer->base + CNTP_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		case ARCH_TIMER_REG_TVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			writel_relaxed(val, timer->base + CNTP_TVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		struct arch_timer *timer = to_arch_timer(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		case ARCH_TIMER_REG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			writel_relaxed(val, timer->base + CNTV_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		case ARCH_TIMER_REG_TVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			writel_relaxed(val, timer->base + CNTV_TVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		arch_timer_reg_write_cp15(access, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		struct arch_timer *timer = to_arch_timer(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		case ARCH_TIMER_REG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			val = readl_relaxed(timer->base + CNTP_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		case ARCH_TIMER_REG_TVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			val = readl_relaxed(timer->base + CNTP_TVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		struct arch_timer *timer = to_arch_timer(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		case ARCH_TIMER_REG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			val = readl_relaxed(timer->base + CNTV_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		case ARCH_TIMER_REG_TVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			val = readl_relaxed(timer->base + CNTV_TVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		val = arch_timer_reg_read_cp15(access, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static notrace u64 arch_counter_get_cntpct_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	return __arch_counter_get_cntpct_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static notrace u64 arch_counter_get_cntpct(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return __arch_counter_get_cntpct();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static notrace u64 arch_counter_get_cntvct_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	return __arch_counter_get_cntvct_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static notrace u64 arch_counter_get_cntvct(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return __arch_counter_get_cntvct();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * Default to cp15 based access because arm64 uses this function for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * sched_clock() before DT is probed and the cp15 method is guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * to exist on arm64. arm doesn't use this before DT is probed so even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * if we don't have the cp15 accessors we won't have a problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) EXPORT_SYMBOL_GPL(arch_timer_read_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static u64 arch_counter_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	return arch_timer_read_counter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static u64 arch_counter_read_cc(const struct cyclecounter *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	return arch_timer_read_counter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static struct clocksource clocksource_counter = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	.name	= "arch_sys_counter",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	.rating	= 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	.read	= arch_counter_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	.mask	= CLOCKSOURCE_MASK(56),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static struct cyclecounter cyclecounter __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	.read	= arch_counter_read_cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	.mask	= CLOCKSOURCE_MASK(56),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) struct ate_acpi_oem_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	char oem_id[ACPI_OEM_ID_SIZE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	u32 oem_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #ifdef CONFIG_FSL_ERRATUM_A008585
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * The number of retries is an arbitrary value well beyond the highest number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * of iterations the loop has been observed to take.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define __fsl_a008585_read_reg(reg) ({			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u64 _old, _new;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	int _retries = 200;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		_old = read_sysreg(reg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		_new = read_sysreg(reg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		_retries--;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	} while (unlikely(_old != _new) && _retries);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	WARN_ON_ONCE(!_retries);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	_new;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	return __fsl_a008585_read_reg(cntp_tval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	return __fsl_a008585_read_reg(cntv_tval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static u64 notrace fsl_a008585_read_cntpct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	return __fsl_a008585_read_reg(cntpct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static u64 notrace fsl_a008585_read_cntvct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	return __fsl_a008585_read_reg(cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #ifdef CONFIG_HISILICON_ERRATUM_161010101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * Verify whether the value of the second read is larger than the first by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * less than 32 is the only way to confirm the value is correct, so clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * lower 5 bits to check whether the difference is greater than 32 or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * Theoretically the erratum should not occur more than twice in succession
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * when reading the system counter, but it is possible that some interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * may lead to more than twice read errors, triggering the warning, so setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * the number of retries far beyond the number of iterations the loop has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * observed to take.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #define __hisi_161010101_read_reg(reg) ({				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	u64 _old, _new;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	int _retries = 50;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		_old = read_sysreg(reg);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		_new = read_sysreg(reg);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		_retries--;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	} while (unlikely((_new - _old) >> 5) && _retries);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	WARN_ON_ONCE(!_retries);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	_new;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return __hisi_161010101_read_reg(cntp_tval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return __hisi_161010101_read_reg(cntv_tval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static u64 notrace hisi_161010101_read_cntpct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	return __hisi_161010101_read_reg(cntpct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static u64 notrace hisi_161010101_read_cntvct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return __hisi_161010101_read_reg(cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 * Note that trailing spaces are required to properly match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * the OEM table information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		.oem_id		= "HISI  ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		.oem_table_id	= "HIP05   ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		.oem_revision	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		.oem_id		= "HISI  ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		.oem_table_id	= "HIP06   ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		.oem_revision	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		.oem_id		= "HISI  ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		.oem_table_id	= "HIP07   ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		.oem_revision	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	{ /* Sentinel indicating the end of the OEM array */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) #ifdef CONFIG_ARM64_ERRATUM_858921
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) static u64 notrace arm64_858921_read_cntpct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	u64 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	old = read_sysreg(cntpct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	new = read_sysreg(cntpct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return (((old ^ new) >> 32) & 1) ? old : new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static u64 notrace arm64_858921_read_cntvct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	u64 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	old = read_sysreg(cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	new = read_sysreg(cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	return (((old ^ new) >> 32) & 1) ? old : new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * The low bits of the counter registers are indeterminate while bit 10 or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * greater is rolling over. Since the counter value can jump both backward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * with all ones or all zeros in the low bits. Bound the loop by the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * number of CPU cycles in 3 consecutive 24 MHz counter periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) #define __sun50i_a64_read_reg(reg) ({					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u64 _val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	int _retries = 150;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		_val = read_sysreg(reg);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		_retries--;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	WARN_ON_ONCE(!_retries);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	_val;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static u64 notrace sun50i_a64_read_cntpct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	return __sun50i_a64_read_reg(cntpct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static u64 notrace sun50i_a64_read_cntvct_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	return __sun50i_a64_read_reg(cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 						struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	u64 cval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (access == ARCH_TIMER_PHYS_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		cval = evt + arch_counter_get_cntpct_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		write_sysreg(cval, cntp_cval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		cval = evt + arch_counter_get_cntvct_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		write_sysreg(cval, cntv_cval_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 					    struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 					    struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) static const struct arch_timer_erratum_workaround ool_workarounds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #ifdef CONFIG_FSL_ERRATUM_A008585
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		.match_type = ate_match_dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		.id = "fsl,erratum-a008585",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		.desc = "Freescale erratum a005858",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		.set_next_event_phys = erratum_set_next_event_tval_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		.set_next_event_virt = erratum_set_next_event_tval_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #ifdef CONFIG_HISILICON_ERRATUM_161010101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		.match_type = ate_match_dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		.id = "hisilicon,erratum-161010101",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		.desc = "HiSilicon erratum 161010101",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		.set_next_event_phys = erratum_set_next_event_tval_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		.set_next_event_virt = erratum_set_next_event_tval_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		.match_type = ate_match_acpi_oem_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		.id = hisi_161010101_oem_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		.desc = "HiSilicon erratum 161010101",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		.set_next_event_phys = erratum_set_next_event_tval_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		.set_next_event_virt = erratum_set_next_event_tval_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #ifdef CONFIG_ARM64_ERRATUM_858921
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		.match_type = ate_match_local_cap_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		.id = (void *)ARM64_WORKAROUND_858921,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		.desc = "ARM erratum 858921",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		.match_type = ate_match_dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		.id = "allwinner,erratum-unknown1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		.desc = "Allwinner erratum UNKNOWN1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		.set_next_event_phys = erratum_set_next_event_tval_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		.set_next_event_virt = erratum_set_next_event_tval_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) #ifdef CONFIG_ARM64_ERRATUM_1418040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		.match_type = ate_match_local_cap_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		.id = (void *)ARM64_WORKAROUND_1418040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		.desc = "ARM erratum 1418040",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		.disable_compat_vdso = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			       const void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				 const void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	const struct device_node *np = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return of_property_read_bool(np, wa->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 					const void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return this_cpu_has_cap((uintptr_t)wa->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				       const void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	static const struct ate_acpi_oem_info empty_oem_info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	const struct ate_acpi_oem_info *info = wa->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	const struct acpi_table_header *table = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/* Iterate over the ACPI OEM info array, looking for a match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	while (memcmp(info, &empty_oem_info, sizeof(*info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		    !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		    info->oem_revision == table->oem_revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		info++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static const struct arch_timer_erratum_workaround *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			  ate_match_fn_t match_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			  void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (ool_workarounds[i].match_type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (match_fn(&ool_workarounds[i], arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			return &ool_workarounds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 				  bool local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		__this_cpu_write(timer_unstable_counter_workaround, wa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			per_cpu(timer_unstable_counter_workaround, i) = wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		atomic_set(&timer_unstable_counter_workaround_in_use, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * Don't use the vdso fastpath if errata require using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 * out-of-line counter accessor. We may change our mind pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	 * late in the game (with a per-CPU erratum, for example), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	 * change both the default value and the vdso itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (wa->read_cntvct_el0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		vdso_default = VDSO_CLOCKMODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		clocksource_counter.vdso_clock_mode = vdso_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 					    void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	const struct arch_timer_erratum_workaround *wa, *__wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	ate_match_fn_t match_fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	bool local = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	case ate_match_dt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		match_fn = arch_timer_check_dt_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	case ate_match_local_cap_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		match_fn = arch_timer_check_local_cap_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		local = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	case ate_match_acpi_oem_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		match_fn = arch_timer_check_acpi_oem_erratum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	wa = arch_timer_iterate_errata(type, match_fn, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (!wa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	__wa = __this_cpu_read(timer_unstable_counter_workaround);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (__wa && wa != __wa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		pr_warn("Can't enable workaround for %s (clashes with %s\n)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			wa->desc, __wa->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (__wa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	arch_timer_enable_workaround(wa, local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	pr_info("Enabling %s workaround for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		local ? "local" : "global", wa->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) static bool arch_timer_this_cpu_has_cntvct_wa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return has_erratum_handler(read_cntvct_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static bool arch_timer_counter_has_wa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return atomic_read(&timer_unstable_counter_workaround_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) #define arch_timer_check_ool_workaround(t,a)		do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) #define arch_timer_this_cpu_has_cntvct_wa()		({false;})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) #define arch_timer_counter_has_wa()			({false;})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static __always_inline irqreturn_t timer_handler(const int access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 					struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct clock_event_device *evt = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct clock_event_device *evt = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	struct clock_event_device *evt = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct clock_event_device *evt = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static __always_inline int timer_shutdown(const int access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 					  struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static int arch_timer_shutdown_virt(struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static int arch_timer_shutdown_phys(struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static __always_inline void set_next_event(const int access, unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 					   struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) static int arch_timer_set_next_event_virt(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 					  struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) static int arch_timer_set_next_event_phys(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 					  struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static int arch_timer_set_next_event_virt_mem(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 					      struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int arch_timer_set_next_event_phys_mem(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 					      struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static void __arch_timer_setup(unsigned type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			       struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	clk->features = CLOCK_EVT_FEAT_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (type == ARCH_TIMER_TYPE_CP15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		typeof(clk->set_next_event) sne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (arch_timer_c3stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			clk->features |= CLOCK_EVT_FEAT_C3STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		clk->name = "arch_sys_timer";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		clk->rating = 450;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		clk->cpumask = cpumask_of(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		switch (arch_timer_uses_ppi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		case ARCH_TIMER_VIRT_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			clk->set_state_shutdown = arch_timer_shutdown_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			sne = erratum_handler(set_next_event_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		case ARCH_TIMER_PHYS_SECURE_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		case ARCH_TIMER_PHYS_NONSECURE_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		case ARCH_TIMER_HYP_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			clk->set_state_shutdown = arch_timer_shutdown_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			sne = erratum_handler(set_next_event_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		clk->set_next_event = sne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		clk->name = "arch_mem_timer";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		clk->rating = 400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		clk->cpumask = cpu_possible_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		if (arch_timer_mem_use_virtual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			clk->set_next_event =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				arch_timer_set_next_event_virt_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			clk->set_next_event =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				arch_timer_set_next_event_phys_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	clk->set_state_shutdown(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static void arch_timer_evtstrm_enable(int divider)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	u32 cntkctl = arch_timer_get_cntkctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	/* Set the divider and enable virtual event stream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			| ARCH_TIMER_VIRT_EVT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	arch_timer_set_cntkctl(cntkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	arch_timer_set_evtstrm_feature();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static void arch_timer_configure_evtstream(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	int evt_stream_div, lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * As the event stream can at most be generated at half the frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * of the counter, use half the frequency when computing the divider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * Find the closest power of two to the divisor. If the adjacent bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	lsb = fls(evt_stream_div) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		lsb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	/* enable event stream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static void arch_counter_set_user_access(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	u32 cntkctl = arch_timer_get_cntkctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* Disable user access to the timers and both counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	/* Also disable virtual event stream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			| ARCH_TIMER_USR_VT_ACCESS_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		        | ARCH_TIMER_USR_VCT_ACCESS_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			| ARCH_TIMER_VIRT_EVT_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 * Enable user access to the virtual counter if it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	 * need to be workaround. The vdso may have been already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	 * disabled though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (arch_timer_this_cpu_has_cntvct_wa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	arch_timer_set_cntkctl(cntkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) static bool arch_timer_has_nonsecure_ppi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static u32 check_ppi_trigger(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	u32 flags = irq_get_trigger_type(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		pr_warn("WARNING: Please fix your firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		flags = IRQF_TRIGGER_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) static int arch_timer_starting_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (arch_timer_has_nonsecure_ppi()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	arch_counter_set_user_access();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (evtstrm_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		arch_timer_configure_evtstream();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) static int validate_timer_rate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (!arch_timer_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/* Arch timer frequency < 1MHz can cause trouble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	WARN_ON(arch_timer_rate < 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * For historical reasons, when probing with DT we use whichever (non-zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * rate was probed first, and don't verify that others match. If the first node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * probed has a clock-frequency property, this overrides the HW register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	/* Who has more than one independent system counter? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (arch_timer_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		arch_timer_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* Check the timer frequency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (validate_timer_rate())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		pr_warn("frequency not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static void arch_timer_banner(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			" and " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		(unsigned long)arch_timer_rate / 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		(unsigned long)(arch_timer_rate / 10000) % 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		type & ARCH_TIMER_TYPE_CP15 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		type & ARCH_TIMER_TYPE_MEM ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			arch_timer_mem_use_virtual ? "virt" : "phys" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) u32 arch_timer_get_rate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	return arch_timer_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) bool arch_timer_evtstrm_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * We might get called from a preemptible context. This is fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * because availability of the event stream should be always the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * for a preemptible context and context where we might resume a task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) static u64 arch_counter_get_cntvct_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	u32 vct_lo, vct_hi, tmp_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	} while (vct_hi != tmp_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	return ((u64) vct_hi << 32) | vct_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static struct arch_timer_kvm_info arch_timer_kvm_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	return &arch_timer_kvm_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static void __init arch_counter_register(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	u64 start_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/* Register the CP15 based counter if we have one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (type & ARCH_TIMER_TYPE_CP15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		u64 (*rd)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			if (arch_timer_counter_has_wa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				rd = arch_counter_get_cntvct_stable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 				rd = arch_counter_get_cntvct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			if (arch_timer_counter_has_wa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 				rd = arch_counter_get_cntpct_stable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				rd = arch_counter_get_cntpct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		arch_timer_read_counter = rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		clocksource_counter.vdso_clock_mode = vdso_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (!arch_counter_suspend_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	start_count = arch_timer_read_counter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	cyclecounter.mult = clocksource_counter.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	cyclecounter.shift = clocksource_counter.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	timecounter_init(&arch_timer_kvm_info.timecounter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			 &cyclecounter, start_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	/* 56 bits minimum, so we assume worst case rollover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static void arch_timer_stop(struct clock_event_device *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (arch_timer_has_nonsecure_ppi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	clk->set_state_shutdown(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static int arch_timer_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	arch_timer_stop(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) #ifdef CONFIG_CPU_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static int arch_timer_cpu_pm_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				    unsigned long action, void *hcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (action == CPU_PM_ENTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (arch_timer_have_evtstrm_feature())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static struct notifier_block arch_timer_cpu_pm_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	.notifier_call = arch_timer_cpu_pm_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static int __init arch_timer_cpu_pm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void __init arch_timer_cpu_pm_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int __init arch_timer_cpu_pm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static void __init arch_timer_cpu_pm_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int __init arch_timer_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	int ppi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	arch_timer_evt = alloc_percpu(struct clock_event_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (!arch_timer_evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	ppi = arch_timer_ppi[arch_timer_uses_ppi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	switch (arch_timer_uses_ppi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	case ARCH_TIMER_VIRT_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 					 "arch_timer", arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	case ARCH_TIMER_PHYS_SECURE_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case ARCH_TIMER_PHYS_NONSECURE_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 					 "arch_timer", arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		if (!err && arch_timer_has_nonsecure_ppi()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 						 "arch_timer", arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 						arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	case ARCH_TIMER_HYP_PPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 					 "arch_timer", arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		pr_err("can't register interrupt %d (%d)\n", ppi, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	err = arch_timer_cpu_pm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		goto out_unreg_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	/* Register and immediately configure the timer on the boot CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				"clockevents/arm/arch_timer:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				arch_timer_starting_cpu, arch_timer_dying_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		goto out_unreg_cpupm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) out_unreg_cpupm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	arch_timer_cpu_pm_deinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) out_unreg_notify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (arch_timer_has_nonsecure_ppi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 				arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	free_percpu(arch_timer_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	irq_handler_t func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct arch_timer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	t = kzalloc(sizeof(*t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	t->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	t->evt.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	if (arch_timer_mem_use_virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		func = arch_timer_handler_virt_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		func = arch_timer_handler_phys_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		pr_err("Failed to request mem timer irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static const struct of_device_id arch_timer_of_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	{ .compatible   = "arm,armv7-timer",    },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	{ .compatible   = "arm,armv8-timer",    },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	{ .compatible   = "arm,armv7-timer-mem", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static bool __init arch_timer_needs_of_probing(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	bool needs_probing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	/* We have two timers, and both device-tree nodes are probed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if ((arch_timers_present & mask) == mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	 * Only one type of timer is probed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 * check if we have another type of timer node in device-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		dn = of_find_matching_node(NULL, arch_timer_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (dn && of_device_is_available(dn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		needs_probing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	return needs_probing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int __init arch_timer_common_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	arch_timer_banner(arch_timers_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	arch_counter_register(arch_timers_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	return arch_timer_arch_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * arch_timer_select_ppi() - Select suitable PPI for the current system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * If HYP mode is available, we know that the physical timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * has been configured to be accessible from PL1. Use it, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  * that a guest can use the virtual timer instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * accesses to CNTP_*_EL1 registers are silently redirected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * their CNTHP_*_EL2 counterparts, and use a different PPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * If no interrupt provided for virtual timer, we'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * stick to the physical timer. It'd better be accessible...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  * For arm64 we never use the secure interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * Return: a suitable PPI type for the current system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (is_kernel_in_hyp_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return ARCH_TIMER_HYP_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		return ARCH_TIMER_VIRT_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (IS_ENABLED(CONFIG_ARM64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		return ARCH_TIMER_PHYS_NONSECURE_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	return ARCH_TIMER_PHYS_SECURE_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static void __init arch_timer_populate_kvm_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (is_kernel_in_hyp_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int __init arch_timer_of_init(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	u32 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		pr_warn("multiple nodes in dt, skipping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	arch_timer_populate_kvm_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	rate = arch_timer_get_cntfrq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	arch_timer_of_configure_rate(rate, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	arch_timer_c3stop = !of_property_read_bool(np, "always-on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/* Check for globally applicable workarounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	arch_timer_check_ool_workaround(ate_match_dt, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 * If we cannot rely on firmware initializing the timer registers then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 * we should use the physical timers instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (IS_ENABLED(CONFIG_ARM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		arch_timer_uses_ppi = arch_timer_select_ppi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		pr_err("No interrupt available, giving up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	/* On some systems, the counter stops ticking when in suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	arch_counter_suspend_stop = of_property_read_bool(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 							 "arm,no-tick-in-suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	ret = arch_timer_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (arch_timer_needs_of_probing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	return arch_timer_common_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static u32 __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	u32 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	base = ioremap(frame->cntbase, frame->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	rate = readl_relaxed(base + CNTFRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static struct arch_timer_mem_frame * __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	void __iomem *cntctlbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	u32 cnttidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (!cntctlbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		pr_err("Can't map CNTCTLBase @ %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			&timer_mem->cntctlbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	 * Try to find a virtual capable frame. Otherwise fall back to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	 * physical capable frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			     CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		frame = &timer_mem->frame[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		if (!frame->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		/* Try enabling everything, and see what sticks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		writel_relaxed(cntacr, cntctlbase + CNTACR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		cntacr = readl_relaxed(cntctlbase + CNTACR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		if ((cnttidr & CNTTIDR_VIRT(i)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			best_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			arch_timer_mem_use_virtual = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		best_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	iounmap(cntctlbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	return best_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	int ret, irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	if (arch_timer_mem_use_virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		irq = frame->virt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		irq = frame->phys_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		pr_err("Frame missing %s irq.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		       arch_timer_mem_use_virtual ? "virt" : "phys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (!request_mem_region(frame->cntbase, frame->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 				"arch_mem_timer"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	base = ioremap(frame->cntbase, frame->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		pr_err("Can't map frame's registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	ret = arch_timer_mem_register(base, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	arch_counter_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	arch_timers_present |= ARCH_TIMER_TYPE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static int __init arch_timer_mem_of_init(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	struct arch_timer_mem *timer_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct arch_timer_mem_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct device_node *frame_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	u32 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	if (!timer_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (of_address_to_resource(np, 0, &res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	timer_mem->cntctlbase = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	timer_mem->size = resource_size(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	for_each_available_child_of_node(np, frame_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		struct arch_timer_mem_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		if (of_property_read_u32(frame_node, "frame-number", &n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			pr_err(FW_BUG "Missing frame-number.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			of_node_put(frame_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			       ARCH_TIMER_MEM_MAX_FRAMES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			of_node_put(frame_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		frame = &timer_mem->frame[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		if (frame->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			pr_err(FW_BUG "Duplicated frame-number.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			of_node_put(frame_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		if (of_address_to_resource(frame_node, 0, &res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			of_node_put(frame_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		frame->cntbase = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		frame->size = resource_size(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		frame->virt_irq = irq_of_parse_and_map(frame_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 						       ARCH_TIMER_VIRT_SPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		frame->phys_irq = irq_of_parse_and_map(frame_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 						       ARCH_TIMER_PHYS_SPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		frame->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	frame = arch_timer_mem_find_best_frame(timer_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (!frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		pr_err("Unable to find a suitable frame in timer @ %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			&timer_mem->cntctlbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	rate = arch_timer_mem_frame_get_cntfrq(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	arch_timer_of_configure_rate(rate, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	ret = arch_timer_mem_frame_register(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (!ret && !arch_timer_needs_of_probing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		ret = arch_timer_common_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	kfree(timer_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		       arch_timer_mem_of_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) #ifdef CONFIG_ACPI_GTDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	struct arch_timer_mem_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	u32 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		frame = &timer_mem->frame[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		if (!frame->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		rate = arch_timer_mem_frame_get_cntfrq(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (rate == arch_timer_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			&frame->cntbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			(unsigned long)rate, (unsigned long)arch_timer_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int __init arch_timer_mem_acpi_init(int platform_timer_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct arch_timer_mem *timers, *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	int timer_count, i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	timers = kcalloc(platform_timer_count, sizeof(*timers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (!timers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	ret = acpi_arch_timer_mem_init(timers, &timer_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (ret || !timer_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	 * While unlikely, it's theoretically possible that none of the frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	 * in a timer expose the combination of feature we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	for (i = 0; i < timer_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		timer = &timers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		frame = arch_timer_mem_find_best_frame(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		if (!best_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			best_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		ret = arch_timer_mem_verify_cntfrq(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		if (!best_frame) /* implies !frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			 * Only complain about missing suitable frames if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			 * haven't already found one in a previous iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			pr_err("Unable to find a suitable frame in timer @ %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 				&timer->cntctlbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (best_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		ret = arch_timer_mem_frame_register(best_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	kfree(timers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* Initialize per-processor generic timer and memory-mapped timer(if present) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static int __init arch_timer_acpi_init(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	int ret, platform_timer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		pr_warn("already initialized, skipping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	ret = acpi_gtdt_init(table, &platform_timer_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	arch_timer_populate_kvm_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 * When probing via ACPI, we have no mechanism to override the sysreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 * CNTFRQ value. This *must* be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	arch_timer_rate = arch_timer_get_cntfrq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	ret = validate_timer_rate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		pr_err(FW_BUG "frequency not available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	arch_timer_uses_ppi = arch_timer_select_ppi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		pr_err("No interrupt available, giving up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/* Always-on capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	/* Check for globally applicable workarounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	ret = arch_timer_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (platform_timer_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	    arch_timer_mem_acpi_init(platform_timer_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		pr_err("Failed to initialize memory-mapped timer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	return arch_timer_common_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) #endif