^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Time of day based timer functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * S390 version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright IBM Corp. 1999, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Hartmut Penner (hp@de.ibm.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Martin Schwidefsky (schwidefsky@de.ibm.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Derived from "arch/i386/kernel/time.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 1991, 1992, 1995 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define KMSG_COMPONENT "time"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/param.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/timekeeper_internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <vdso/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <vdso/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <vdso/helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/vtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/stp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned char tod_clock_base[16] __aligned(8) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Force to data section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) EXPORT_SYMBOL_GPL(tod_clock_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 clock_comparator_max = -1ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL_GPL(clock_comparator_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static DEFINE_PER_CPU(struct clock_event_device, comparators);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL(s390_epoch_delta_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned char ptff_function_mask[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static unsigned long long lpar_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static unsigned long long initial_leap_seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static unsigned long long tod_steering_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static long long tod_steering_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Get time offsets with PTFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void __init time_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct ptff_qto qto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct ptff_qui qui;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Initialize TOD steering parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) vdso_data->arch_data.tod_steering_end = tod_steering_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!test_facility(28))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* get LPAR offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) lpar_offset = qto.tod_epoch_difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* get initial leap seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) initial_leap_seconds = (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ((long) qui.old_leap * 4096000000L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Scheduler clock - returns current time in nanosec units.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long long notrace sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return tod_to_ns(get_tod_clock_monotonic());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) NOKPROBE_SYMBOL(sched_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long long high, low, rem, sec, nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) high = (*(unsigned long long *) clk) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) low = (*(unsigned long long *)&clk[7]) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Calculate seconds and nano-seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sec = high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rem = do_div(sec, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) xt->tv_sec = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) xt->tv_nsec = nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void clock_comparator_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct clock_event_device *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) S390_lowcore.clock_comparator = clock_comparator_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) cd = this_cpu_ptr(&comparators);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cd->event_handler(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int s390_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) S390_lowcore.clock_comparator = get_tod_clock() + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) set_clock_comparator(S390_lowcore.clock_comparator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Set up lowcore and control register of the current cpu to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * enable TOD clock and clock comparator interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void init_cpu_timer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct clock_event_device *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) S390_lowcore.clock_comparator = clock_comparator_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) set_clock_comparator(S390_lowcore.clock_comparator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cd = &per_cpu(comparators, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cd->name = "comparator";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) cd->features = CLOCK_EVT_FEAT_ONESHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) cd->mult = 16777;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) cd->shift = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cd->min_delta_ns = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cd->min_delta_ticks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cd->max_delta_ns = LONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cd->max_delta_ticks = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) cd->rating = 400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cd->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cd->set_next_event = s390_next_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) clockevents_register_device(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Enable clock comparator timer interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __ctl_set_bit(0,11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Always allow the timing alert external interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __ctl_set_bit(0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void clock_comparator_interrupt(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned int param32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) inc_irq_stat(IRQEXT_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (S390_lowcore.clock_comparator == clock_comparator_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) set_clock_comparator(S390_lowcore.clock_comparator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void stp_timing_alert(struct stp_irq_parm *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void timing_alert_interrupt(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned int param32, unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) inc_irq_stat(IRQEXT_TLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (param32 & 0x00038000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) stp_timing_alert((struct stp_irq_parm *) ¶m32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void stp_reset(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void read_persistent_clock64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned char clk[STORE_CLOCK_EXT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) delta = initial_leap_seconds + TOD_UNIX_EPOCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) get_tod_clock_ext(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *(__u64 *) &clk[1] -= delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (*(__u64 *) &clk[1] > delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) clk[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ext_to_timespec64(clk, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct timespec64 *boot_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned char clk[STORE_CLOCK_EXT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct timespec64 boot_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) delta = initial_leap_seconds + TOD_UNIX_EPOCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *(__u64 *)&clk[1] -= delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (*(__u64 *)&clk[1] > delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) clk[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ext_to_timespec64(clk, &boot_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) read_persistent_clock64(wall_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *boot_offset = timespec64_sub(*wall_time, boot_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static u64 read_tod_clock(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long long now, adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) preempt_disable(); /* protect from changes to steering parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) now = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) adj = tod_steering_end - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (unlikely((s64) adj > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * manually steer by 1 cycle every 2^16 cycles. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * corresponds to shifting the tod delta by 15. 1s is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * therefore steered in ~9h. The adjust will decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * over time, until it finally reaches 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static struct clocksource clocksource_tod = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .name = "tod",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .rating = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .read = read_tod_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .mult = 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .shift = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .vdso_clock_mode = VDSO_CLOCKMODE_TOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct clocksource * __init clocksource_default_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return &clocksource_tod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Initialize the TOD clock and the CPU timer of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * the boot cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void __init time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Reset time synchronization interfaces. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) stp_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* request the clock comparator external interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) panic("Couldn't request external interrupt 0x1004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* request the timing alert external interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) panic("Couldn't request external interrupt 0x1406");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (__clocksource_register(&clocksource_tod) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) panic("Could not register TOD clock source");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Enable TOD clock interrupts on the boot cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) init_cpu_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Enable cpu timer interrupts on the boot cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) vtime_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static DEFINE_PER_CPU(atomic_t, clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static DEFINE_MUTEX(stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static unsigned long clock_sync_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define CLOCK_SYNC_HAS_STP 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define CLOCK_SYNC_STP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define CLOCK_SYNC_STPINFO_VALID 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * The get_clock function for the physical clock. It will get the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * TOD clock, subtract the LPAR offset and write the result to *clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * The function returns 0 if the clock is in sync with the external time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * source. If the clock mode is local it will return -EOPNOTSUPP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * -EAGAIN if the clock is not in sync with the external reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int get_phys_clock(unsigned long *clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) atomic_t *sw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned int sw0, sw1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sw_ptr = &get_cpu_var(clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sw0 = atomic_read(sw_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *clock = get_tod_clock() - lpar_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sw1 = atomic_read(sw_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) put_cpu_var(clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (sw0 == sw1 && (sw0 & 0x80000000U))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Success: time is in sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) EXPORT_SYMBOL(get_phys_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Make get_phys_clock() return -EAGAIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void disable_sync_clock(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Clear the in-sync bit 2^31. All get_phys_clock calls will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * fail until the sync bit is turned back on. In addition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * increase the "sequence" counter to avoid the race of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * stp event and the complete recovery against get_phys_clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) atomic_andnot(0x80000000, sw_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) atomic_inc(sw_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Make get_phys_clock() return 0 again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Needs to be called from a context disabled for preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void enable_sync_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) atomic_or(0x80000000, sw_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Function to check if the clock is in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline int check_sync_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) atomic_t *sw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sw_ptr = &get_cpu_var(clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) put_cpu_var(clock_sync_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Apply clock delta to the global data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * This is called once on the CPU that performed the clock sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void clock_sync_global(unsigned long long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned long now, adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct ptff_qto qto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Fixup the monotonic sched clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *(unsigned long long *) &tod_clock_base[1] += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (*(unsigned long long *) &tod_clock_base[1] < delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Epoch overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tod_clock_base[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* Adjust TOD steering parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) now = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) adj = tod_steering_end - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (unlikely((s64) adj >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Calculate how much of the old adjustment is left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tod_steering_delta = (tod_steering_delta < 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) -(adj >> 15) : (adj >> 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) tod_steering_delta += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if ((abs(tod_steering_delta) >> 48) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) panic("TOD clock sync offset %lli is too large to drift\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) tod_steering_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tod_steering_end = now + (abs(tod_steering_delta) << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) vdso_data->arch_data.tod_steering_end = tod_steering_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) vdso_data->arch_data.tod_steering_delta = tod_steering_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Update LPAR offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) lpar_offset = qto.tod_epoch_difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Call the TOD clock change notifier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Apply clock delta to the per-CPU data structures of this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * This is called for each online CPU after the call to clock_sync_global.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void clock_sync_local(unsigned long long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Add the delta to the clock comparator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (S390_lowcore.clock_comparator != clock_comparator_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) S390_lowcore.clock_comparator += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) set_clock_comparator(S390_lowcore.clock_comparator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Adjust the last_update_clock time-stamp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) S390_lowcore.last_update_clock += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Single threaded workqueue used for stp sync events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static struct workqueue_struct *time_sync_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void __init time_init_wq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (time_sync_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) time_sync_wq = create_singlethread_workqueue("timesync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct clock_sync_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) atomic_t cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int in_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned long long clock_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Server Time Protocol (STP) code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static bool stp_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static struct stp_sstpi stp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void *stp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void stp_work_fn(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static DECLARE_WORK(stp_work, stp_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static struct timer_list stp_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int __init early_parse_stp(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return kstrtobool(p, &stp_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) early_param("stp", early_parse_stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Reset STP attachment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void __init stp_reset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else if (stp_online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pr_warn("The real or virtual hardware system does not provide an STP interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) free_page((unsigned long) stp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) stp_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) stp_online = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void stp_timeout(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) queue_work(time_sync_wq, &stp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int __init stp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) timer_setup(&stp_timer, stp_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) time_init_wq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!stp_online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) queue_work(time_sync_wq, &stp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) arch_initcall(stp_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * STP timing alert. There are three causes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * 1) timing status change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * 2) link availability change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * 3) time control parameter change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * In all three cases we are only interested in the clock source state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * If a STP clock source is now available use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void stp_timing_alert(struct stp_irq_parm *intparm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (intparm->tsc || intparm->lac || intparm->tcpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) queue_work(time_sync_wq, &stp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * STP sync check machine check. This is called when the timing state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * changes from the synchronized state to the unsynchronized state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * After a STP sync check the clock is not in sync. The machine check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * is broadcasted to all cpus at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int stp_sync_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) disable_sync_clock(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * STP island condition machine check. This is called when an attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * server attempts to communicate over an STP link and the servers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * have matching CTN ids and have a valid stratum-1 configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * but the configurations do not match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int stp_island_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) disable_sync_clock(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void stp_queue_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) queue_work(time_sync_wq, &stp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static int __store_stpinfo(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static int stpinfo_valid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int stp_sync_clock(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct clock_sync_data *sync = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long long clock_delta, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) enable_sync_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (xchg(&first, 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Wait until all other cpus entered the sync function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) while (atomic_read(&sync->cpus) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (stp_info.todoff[0] || stp_info.todoff[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) stp_info.todoff[2] || stp_info.todoff[3] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) stp_info.tmd != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) flags = vdso_update_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) &clock_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) sync->clock_delta = clock_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) clock_sync_global(clock_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rc = __store_stpinfo();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (rc == 0 && stp_info.tmd != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) vdso_update_end(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sync->in_sync = rc ? -EAGAIN : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) xchg(&first, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) atomic_dec(&sync->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Wait for in_sync to be set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) while (READ_ONCE(sync->in_sync) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (sync->in_sync != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Didn't work. Clear per-cpu in sync bit again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) disable_sync_clock(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Apply clock delta to per-CPU fields of this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) clock_sync_local(sync->clock_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int stp_clear_leap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct __kernel_timex txc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) memset(&txc, 0, sizeof(txc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ret = do_adjtimex(&txc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) txc.modes = ADJ_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) txc.status &= ~(STA_INS|STA_DEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return do_adjtimex(&txc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static void stp_check_leap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct stp_stzi stzi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct stp_lsoib *lsoib = &stzi.lsoib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct __kernel_timex txc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int64_t timediff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int leapdiff, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!stp_info.lu || !check_sync_clock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Either a scheduled leap second was removed by the operator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * or STP is out of sync. In both cases, clear the leap second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * kernel flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (stp_clear_leap() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_err("failed to clear leap second flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pr_err("stzi failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) leapdiff = lsoib->nlso - lsoib->also;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (leapdiff != 1 && leapdiff != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pr_err("Cannot schedule %d leap seconds\n", leapdiff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (timediff < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (stp_clear_leap() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) pr_err("failed to clear leap second flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) } else if (timediff < 7200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) memset(&txc, 0, sizeof(txc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ret = do_adjtimex(&txc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) txc.modes = ADJ_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (leapdiff > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) txc.status |= STA_INS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) txc.status |= STA_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = do_adjtimex(&txc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pr_err("failed to set leap second flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* arm Timer to clear leap second flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* The day the leap second is scheduled for hasn't been reached. Retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * in one hour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * STP work. Check for the STP state and take over the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * synchronization if the STP clock source is usable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static void stp_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct clock_sync_data stp_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* prevent multiple execution. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!stp_online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) del_timer_sync(&stp_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rc = __store_stpinfo();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (rc || stp_info.c == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Skip synchronization if the clock is already in sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!check_sync_clock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) memset(&stp_sync, 0, sizeof(stp_sync));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!check_sync_clock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * There is a usable clock but the synchonization failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Retry after a second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) else if (stp_info.lu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) stp_check_leap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * STP subsys sysfs interface functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static struct bus_type stp_subsys = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .name = "stp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .dev_name = "stp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static ssize_t ctn_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (stpinfo_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = sprintf(buf, "%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) *(unsigned long long *) stp_info.ctnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static DEVICE_ATTR_RO(ctn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static ssize_t ctn_type_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (stpinfo_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = sprintf(buf, "%i\n", stp_info.ctn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static DEVICE_ATTR_RO(ctn_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static ssize_t dst_offset_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (stpinfo_valid() && (stp_info.vbits & 0x2000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static DEVICE_ATTR_RO(dst_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static ssize_t leap_seconds_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (stpinfo_valid() && (stp_info.vbits & 0x8000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static DEVICE_ATTR_RO(leap_seconds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static ssize_t leap_seconds_scheduled_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct stp_stzi stzi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!stzi.lsoib.p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return sprintf(buf, "0,0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return sprintf(buf, "%llu,%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) stzi.lsoib.nlso - stzi.lsoib.also);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static DEVICE_ATTR_RO(leap_seconds_scheduled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static ssize_t stratum_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (stpinfo_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static DEVICE_ATTR_RO(stratum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static ssize_t time_offset_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (stpinfo_valid() && (stp_info.vbits & 0x0800))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = sprintf(buf, "%i\n", (int) stp_info.tto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static DEVICE_ATTR_RO(time_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static ssize_t time_zone_offset_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (stpinfo_valid() && (stp_info.vbits & 0x4000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static DEVICE_ATTR_RO(time_zone_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static ssize_t timing_mode_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (stpinfo_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ret = sprintf(buf, "%i\n", stp_info.tmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static DEVICE_ATTR_RO(timing_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static ssize_t timing_state_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ssize_t ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (stpinfo_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = sprintf(buf, "%i\n", stp_info.tst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static DEVICE_ATTR_RO(timing_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static ssize_t online_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return sprintf(buf, "%i\n", stp_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static ssize_t online_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) value = simple_strtoul(buf, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (value != 0 && value != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) mutex_lock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) stp_online = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (stp_online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) queue_work(time_sync_wq, &stp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mutex_unlock(&stp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Can't use DEVICE_ATTR because the attribute should be named
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * stp/online but dev_attr_online already exists in this file ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static DEVICE_ATTR_RW(online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static struct device_attribute *stp_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) &dev_attr_ctn_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) &dev_attr_ctn_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) &dev_attr_dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) &dev_attr_leap_seconds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) &dev_attr_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) &dev_attr_leap_seconds_scheduled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) &dev_attr_stratum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) &dev_attr_time_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) &dev_attr_time_zone_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) &dev_attr_timing_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) &dev_attr_timing_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static int __init stp_init_sysfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct device_attribute **attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) rc = subsys_system_register(&stp_subsys, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) for (attr = stp_attributes; *attr; attr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) rc = device_create_file(stp_subsys.dev_root, *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto out_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) out_unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) for (; attr >= stp_attributes; attr--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) device_remove_file(stp_subsys.dev_root, *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) bus_unregister(&stp_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) device_initcall(stp_init_sysfs);