Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * tick internal variable and functions used by low/high res code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "timekeeping.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "tick-sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifdef CONFIG_GENERIC_CLOCKEVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) # define TICK_DO_TIMER_NONE	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) # define TICK_DO_TIMER_BOOT	-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) extern ktime_t tick_next_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) extern ktime_t tick_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) extern int tick_do_timer_cpu __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) extern void tick_handle_periodic(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) extern void tick_check_new_device(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) extern void tick_shutdown(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) extern void tick_suspend(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) extern void tick_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) extern bool tick_check_replacement(struct clock_event_device *curdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 				   struct clock_event_device *newdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) extern void tick_install_replacement(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) extern int tick_is_oneshot_available(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) extern struct tick_device *tick_get_device(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) extern int clockevents_tick_resume(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /* Check, if the device is functional or a dummy for broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static inline int tick_device_is_functional(struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return dev->state_use_accessors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static inline void clockevent_set_state(struct clock_event_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 					enum clock_event_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	dev->state_use_accessors = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) extern void clockevents_shutdown(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) extern void clockevents_exchange_device(struct clock_event_device *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 					struct clock_event_device *new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) extern void clockevents_switch_state(struct clock_event_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 				     enum clock_event_state state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) extern int clockevents_program_event(struct clock_event_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 				     ktime_t expires, bool force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) extern void clockevents_handle_noop(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /* Broadcasting support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) extern void tick_install_broadcast_device(struct clock_event_device *dev, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) extern int tick_is_broadcast_device(struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) extern void tick_suspend_broadcast(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) extern void tick_resume_broadcast(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) extern bool tick_resume_check_broadcast(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) extern void tick_broadcast_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) extern struct tick_device *tick_get_broadcast_device(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) extern struct cpumask *tick_get_broadcast_mask(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) extern const struct clock_event_device *tick_get_wakeup_device(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) # else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static inline void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static inline void tick_suspend_broadcast(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static inline void tick_resume_broadcast(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static inline bool tick_resume_check_broadcast(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static inline void tick_broadcast_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { return -ENODEV; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* Set the periodic handler in non broadcast mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static inline void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	dev->event_handler = tick_handle_periodic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) # endif /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #else /* !GENERIC_CLOCKEVENTS: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static inline void tick_suspend(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static inline void tick_resume(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #endif /* !GENERIC_CLOCKEVENTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /* Oneshot related functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #ifdef CONFIG_TICK_ONESHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) extern void tick_setup_oneshot(struct clock_event_device *newdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			       void (*handler)(struct clock_event_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			       ktime_t nextevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) extern int tick_program_event(ktime_t expires, int force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) extern void tick_oneshot_notify(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) extern void tick_resume_oneshot(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline bool tick_oneshot_possible(void) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) extern int tick_oneshot_mode_active(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) extern void tick_clock_notify(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern int tick_check_oneshot_change(int allow_nohz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) extern int tick_init_highres(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #else /* !CONFIG_TICK_ONESHOT: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void tick_setup_oneshot(struct clock_event_device *newdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			void (*handler)(struct clock_event_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			ktime_t nextevt) { BUG(); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline void tick_resume_oneshot(void) { BUG(); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline int tick_program_event(ktime_t expires, int force) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline void tick_oneshot_notify(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline bool tick_oneshot_possible(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline int tick_oneshot_mode_active(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void tick_clock_notify(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif /* !CONFIG_TICK_ONESHOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Functions related to oneshot broadcasting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) extern void tick_broadcast_switch_to_oneshot(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) extern int tick_broadcast_oneshot_active(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern void tick_check_oneshot_broadcast_this_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bool tick_broadcast_oneshot_available(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #else /* !(BROADCAST && ONESHOT): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline void tick_broadcast_switch_to_oneshot(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline int tick_broadcast_oneshot_active(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif /* !(BROADCAST && ONESHOT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) extern void tick_broadcast_offline(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline void tick_broadcast_offline(unsigned int cpu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* NO_HZ_FULL internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifdef CONFIG_NO_HZ_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) extern void tick_nohz_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline void tick_nohz_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_NO_HZ_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) extern unsigned long tick_nohz_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) extern void timers_update_nohz(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) # ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) extern struct static_key_false timers_migration_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #else /* CONFIG_NO_HZ_COMMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline void timers_update_nohz(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define tick_nohz_active (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void timer_clear_idle(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void clock_was_set(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void clock_was_set_delayed(void);