^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/alpha/kernel/time.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file contains the clocksource time handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * "A Kernel Model for Precision Timekeeping" by Dave Mills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 1997-01-09 Adrian Sun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * use interval timer if CONFIG_RTC=y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 1997-10-29 John Bowman (bowman@math.ualberta.ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * fixed tick loss calculation in timer_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * (round system clock to nearest tick instead of truncating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * fixed algorithm in time_init for getting time from CMOS clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * fixed algorithm in do_gettimeofday() for calculating the precise time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * from processor cycle counter (now taking lost_ticks into account)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/param.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/bcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/mc146818rtc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "irq_impl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DEFINE_SPINLOCK(rtc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) EXPORT_SYMBOL(rtc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long est_cycle_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_IRQ_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) DEFINE_PER_CPU(u8, irq_work_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void arch_irq_work_raise(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) set_irq_work_pending_flag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #else /* CONFIG_IRQ_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define test_irq_work_pending() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define clear_irq_work_pending()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif /* CONFIG_IRQ_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline __u32 rpcc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return __builtin_alpha_rpcc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * The RTC as a clock_event_device primitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) rtc_timer_interrupt(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Don't run the hook for UNUSED or SHUTDOWN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (likely(clockevent_state_periodic(ce)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ce->event_handler(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (test_irq_work_pending()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) clear_irq_work_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) irq_work_run();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* This hook is for oneshot mode, which we don't support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) init_rtc_clockevent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *ce = (struct clock_event_device){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .name = "rtc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .features = CLOCK_EVT_FEAT_PERIODIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .rating = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .cpumask = cpumask_of(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .set_next_event = rtc_ce_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * The QEMU clock as a clocksource primitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) qemu_cs_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return qemu_get_vmtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct clocksource qemu_cs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .name = "qemu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .rating = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .read = qemu_cs_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .max_idle_ns = LONG_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The QEMU alarm as a clock_event_device primitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int qemu_ce_shutdown(struct clock_event_device *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* The mode member of CE is updated for us in generic code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) Just make sure that the event is disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) qemu_set_alarm_abs(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) qemu_set_alarm_rel(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) qemu_timer_interrupt(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ce->event_handler(ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) init_qemu_clockevent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *ce = (struct clock_event_device){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .name = "qemu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .features = CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .rating = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .cpumask = cpumask_of(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .set_state_shutdown = qemu_ce_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .set_state_oneshot = qemu_ce_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .tick_resume = qemu_ce_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .set_next_event = qemu_ce_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) common_init_rtc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned char x, sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Reset periodic interrupt frequency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Test includes known working values on various platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) where 0x26 is wrong; we refuse to change those. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sel = RTC_REF_CLCK_32KHZ + 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) # error "Unknown HZ from arch/alpha/Kconfig"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) CONFIG_HZ, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) CMOS_WRITE(sel, RTC_FREQ_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Turn on periodic interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) x = CMOS_READ(RTC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!(x & RTC_PIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) printk("Turning on RTC interrupts.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) x |= RTC_PIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) x &= ~(RTC_AIE | RTC_UIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) CMOS_WRITE(x, RTC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) (void) CMOS_READ(RTC_INTR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) outb(0x36, 0x43); /* pit counter 0: system timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) outb(0x00, 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) outb(0x00, 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) outb(0xb6, 0x43); /* pit counter 2: speaker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) outb(0x31, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) outb(0x13, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) init_rtc_irq(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifndef CONFIG_ALPHA_WTINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * The RPCC as a clocksource primitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * While we have free-running timecounters running on all CPUs, and we make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * with the wall clock, that initialization isn't kept up-to-date across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * different time counters in SMP mode. Therefore we can only use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * method when there's only one CPU enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * or stop altogether, while waiting for the interrupt. Therefore we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * use this method when WTINT is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static u64 read_rpcc(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return rpcc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static struct clocksource clocksource_rpcc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .name = "rpcc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .read = read_rpcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .mask = CLOCKSOURCE_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .flags = CLOCK_SOURCE_IS_CONTINUOUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #endif /* ALPHA_WTINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Validate a computed cycle counter result against the known bounds for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) the given processor core. There's too much brokenness in the way of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) timing hardware for any one method to work everywhere. :-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) Return 0 if the result cannot be trusted, otherwise return the argument. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static unsigned long __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) validate_cc_value(unsigned long cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static struct bounds {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned int min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) } cpu_hz[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) [EV3_CPU] = { 50000000, 200000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) [EV4_CPU] = { 100000000, 300000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) [LCA4_CPU] = { 100000000, 300000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) [EV45_CPU] = { 200000000, 300000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) [EV5_CPU] = { 250000000, 433000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) [EV56_CPU] = { 333000000, 667000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) [PCA56_CPU] = { 400000000, 600000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) [PCA57_CPU] = { 500000000, 600000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) [EV6_CPU] = { 466000000, 600000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) [EV67_CPU] = { 600000000, 750000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) [EV68AL_CPU] = { 750000000, 940000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) [EV68CB_CPU] = { 1000000000, 1333333333 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* None of the following are shipping as of 2001-11-01. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) [EV7_CPU] = { 800000000, 1400000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Allow for some drift in the crystal. 10MHz is more than enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) const unsigned int deviation = 10000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct percpu_struct *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) index = cpu->type & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* If index out of bounds, no way to validate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (index >= ARRAY_SIZE(cpu_hz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* If index contains no data, no way to validate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (cpu_hz[index].max == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (cc < cpu_hz[index].min - deviation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) || cc > cpu_hz[index].max + deviation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * arch/i386/time.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define CALIBRATE_LATCH 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define TIMEOUT_COUNT 0x100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static unsigned long __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) calibrate_cc_with_pit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int cc, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Set the Gate high, disable speaker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) outb((inb(0x61) & ~0x02) | 0x01, 0x61);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Now let's take care of CTC channel 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Set the Gate high, program CTC channel 2 for mode 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * (interrupt on terminal count mode), binary count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * load 5 * LATCH count, (LSB and MSB) to begin countdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cc = rpcc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cc = rpcc() - cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Error: ECTCNEVERSET or ECPUTOOFAST. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (count <= 1 || count == TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* The Linux interpretation of the CMOS clock register contents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) When the Update-In-Progress (UIP) flag goes from 1 to 0, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) RTC registers show the second which has precisely just started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) Let's hope other operating systems interpret the RTC the same way. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static unsigned long __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rpcc_after_update_in_progress(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return rpcc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned int cc1, cc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long cycle_freq, tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (alpha_using_qemu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) init_qemu_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) init_rtc_irq(qemu_timer_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Calibrate CPU clock -- attempt #1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!est_cycle_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) cc1 = rpcc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Calibrate CPU clock -- attempt #2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!est_cycle_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cc1 = rpcc_after_update_in_progress();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cc2 = rpcc_after_update_in_progress();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) est_cycle_freq = validate_cc_value(cc2 - cc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) cc1 = cc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cycle_freq = hwrpb->cycle_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (est_cycle_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* If the given value is within 250 PPM of what we calculated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) accept it. Otherwise, use what we found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tolerance = cycle_freq / 4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) diff = cycle_freq - est_cycle_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) diff = -diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if ((unsigned long)diff > tolerance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cycle_freq = est_cycle_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) printk("HWRPB cycle frequency bogus. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "Estimated %lu Hz\n", cycle_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) est_cycle_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) } else if (! validate_cc_value (cycle_freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) printk("HWRPB cycle frequency bogus, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "and unable to estimate a proper value!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* See above for restrictions on using clocksource_rpcc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #ifndef CONFIG_ALPHA_WTINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (hwrpb->nr_processors == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) clocksource_register_hz(&clocksource_rpcc, cycle_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Startup the timer source. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) alpha_mv.init_rtc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) init_rtc_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Initialize the clock_event_device for secondary cpus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) init_clockevent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (alpha_using_qemu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) init_qemu_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) init_rtc_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #endif