^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_PREEMPT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_PREEMPT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define PREEMPT_ENABLED (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static __always_inline int preempt_count(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) return READ_ONCE(current_thread_info()->preempt_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static __always_inline volatile int *preempt_count_ptr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return ¤t_thread_info()->preempt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static __always_inline void preempt_count_set(int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *preempt_count_ptr() = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * must be macros to avoid header recursion hell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define init_task_preempt_count(p) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define init_idle_preempt_count(p, cpu) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static __always_inline void set_preempt_need_resched(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static __always_inline void clear_preempt_need_resched(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static __always_inline bool test_preempt_need_resched(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * The various preempt_count add/sub methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static __always_inline void __preempt_count_add(int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *preempt_count_ptr() += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static __always_inline void __preempt_count_sub(int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *preempt_count_ptr() -= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static __always_inline bool __preempt_count_dec_and_test(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Because of load-store architectures cannot do per-cpu atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return !--*preempt_count_ptr() && tif_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Returns true when we need to resched and can (barring IRQ state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static __always_inline bool should_resched(int preempt_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return unlikely(preempt_count() == preempt_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) tif_need_resched());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) extern asmlinkage void preempt_schedule(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define __preempt_schedule() preempt_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) extern asmlinkage void preempt_schedule_notrace(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define __preempt_schedule_notrace() preempt_schedule_notrace()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #endif /* CONFIG_PREEMPTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #endif /* __ASM_PREEMPT_H */