Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2019 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Generic implementation of update_vsyscall and update_vsyscall_tz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Based on the x86 specific implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/timekeeper_internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <vdso/datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <vdso/helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <vdso/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "timekeeping_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static inline void update_vdso_data(struct vdso_data *vdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 				    struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct vdso_timestamp *vdso_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	u64 nsec, sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	vdata[CS_HRES_COARSE].cycle_last	= tk->tkr_mono.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	vdata[CS_HRES_COARSE].mask		= tk->tkr_mono.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	vdata[CS_HRES_COARSE].mult		= tk->tkr_mono.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	vdata[CS_HRES_COARSE].shift		= tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	vdata[CS_RAW].cycle_last		= tk->tkr_raw.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	vdata[CS_RAW].mask			= tk->tkr_raw.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	vdata[CS_RAW].mult			= tk->tkr_raw.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	vdata[CS_RAW].shift			= tk->tkr_raw.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	/* CLOCK_MONOTONIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	vdso_ts->sec	= tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	nsec = tk->tkr_mono.xtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		vdso_ts->sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	vdso_ts->nsec	= nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/* Copy MONOTONIC time for BOOTTIME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	sec	= vdso_ts->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/* Add the boot offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	sec	+= tk->monotonic_to_boot.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	nsec	+= (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* CLOCK_BOOTTIME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	vdso_ts->sec	= sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		vdso_ts->sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	vdso_ts->nsec	= nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* CLOCK_MONOTONIC_RAW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	vdso_ts		= &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	vdso_ts->sec	= tk->raw_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	vdso_ts->nsec	= tk->tkr_raw.xtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* CLOCK_TAI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	vdso_ts->sec	= tk->xtime_sec + (s64)tk->tai_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) void update_vsyscall(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct vdso_data *vdata = __arch_get_k_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct vdso_timestamp *vdso_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	s32 clock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	u64 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	/* copy vsyscall data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	vdso_write_begin(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	vdata[CS_HRES_COARSE].clock_mode	= clock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	vdata[CS_RAW].clock_mode		= clock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	/* CLOCK_REALTIME also required for time() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	vdso_ts->sec	= tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* CLOCK_REALTIME_COARSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	vdso_ts->sec	= tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* CLOCK_MONOTONIC_COARSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	vdso_ts		= &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	vdso_ts->sec	= tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	nsec		= tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	nsec		= nsec + tk->wall_to_monotonic.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	vdso_ts->sec	+= __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * Read without the seqlock held by clock_getres().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * Note: No need to have a second copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * If the current clocksource is not VDSO capable, then spare the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * update of the high reolution parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (clock_mode != VDSO_CLOCKMODE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		update_vdso_data(vdata, tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	__arch_update_vsyscall(vdata, tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	vdso_write_end(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	__arch_sync_vdso_data(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void update_vsyscall_tz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct vdso_data *vdata = __arch_get_k_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	__arch_sync_vdso_data(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * vdso_update_begin - Start of a VDSO update section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * Allows architecture code to safely update the architecture specific VDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * data. Disables interrupts, acquires timekeeper lock to serialize against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * concurrent updates from timekeeping and invalidates the VDSO data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * sequence counter to prevent concurrent readers from accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * inconsistent data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * Returns: Saved interrupt flags which need to be handed in to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * vdso_update_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long vdso_update_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct vdso_data *vdata = __arch_get_k_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	vdso_write_begin(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * vdso_update_end - End of a VDSO update section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @flags:	Interrupt flags as returned from vdso_update_begin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * synchronization if the architecture requires it, drops timekeeper lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * and restores interrupt flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void vdso_update_end(unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct vdso_data *vdata = __arch_get_k_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	vdso_write_end(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	__arch_sync_vdso_data(vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }