^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Prevent the compiler from merging or refetching reads or writes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * compiler is also forbidden from reordering successive instances of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * particular ordering. One way to make the compiler aware of ordering is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * put the two invocations of READ_ONCE or WRITE_ONCE in different C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * statements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * These two macros will also work on aggregate data types like structs or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * unions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Their two major use cases are: (1) Mediating communication between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * process-level code and irq/NMI handlers, all running on the same CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * mutilate accesses that either do not require ordering or that interact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * with an explicit memory barrier or atomic instruction that provides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * required ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifndef __ASM_GENERIC_RWONCE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define __ASM_GENERIC_RWONCE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/compiler_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kasan-checks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/kcsan-checks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Yes, this permits 64-bit accesses on 32-bit architectures. These will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * (e.g. a virtual address) and a strong prevailing wind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define compiletime_assert_rwonce_type(t) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "Unsupported access size for {READ,WRITE}_ONCE().")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * atomicity. Note that this may result in tears!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifndef __READ_ONCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define READ_ONCE(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) compiletime_assert_rwonce_type(x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) __READ_ONCE(x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define __WRITE_ONCE(x, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *(volatile typeof(x) *)&(x) = (val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define WRITE_ONCE(x, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) compiletime_assert_rwonce_type(x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __WRITE_ONCE(x, val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static __no_sanitize_or_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long __read_once_word_nocheck(const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return __READ_ONCE(*(unsigned long *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * word from memory atomically but without telling KASAN/KCSAN. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * usually used by unwinding code when walking the stack of a running process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define READ_ONCE_NOCHECK(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) compiletime_assert(sizeof(x) == sizeof(unsigned long), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) "Unsupported access size for READ_ONCE_NOCHECK()."); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) (typeof(x))__read_once_word_nocheck(&(x)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static __no_kasan_or_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long read_word_at_a_time(const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kasan_check_read(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return *(unsigned long *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* __ASM_GENERIC_RWONCE_H */