^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/include/asm/barrier.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __ASM_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __ASM_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kasan-checks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define __nops(n) ".rept " #n "\nnop\n.endr\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define nops(n) asm volatile(__nops(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define sev() asm volatile("sev" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define wfe() asm volatile("wfe" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define wfi() asm volatile("wfi" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define isb() asm volatile("isb" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define psb_csync() asm volatile("hint #17" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define __tsb_csync() asm volatile("hint #18" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define csdb() asm volatile("hint #20" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) SB_BARRIER_INSN"nop\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ARM64_HAS_SB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifdef CONFIG_ARM64_PSEUDO_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define pmr_sync() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) extern struct static_key_false gic_pmr_sync; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (static_branch_unlikely(&gic_pmr_sync)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dsb(sy); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define pmr_sync() do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define mb() dsb(sy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define rmb() dsb(ld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define wmb() dsb(st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define dma_mb() dmb(osh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define dma_rmb() dmb(oshld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define dma_wmb() dmb(oshst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define tsb_csync() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * another TSB to ensure the trace is flushed. The barriers \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * don't have to be strictly back to back, as long as the \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * CPU is in trace prohibited state. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __tsb_csync(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __tsb_csync(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * and 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define array_index_mask_nospec array_index_mask_nospec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline unsigned long array_index_mask_nospec(unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) " cmp %1, %2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) " sbc %0, xzr, xzr\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) : "=r" (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) : "r" (idx), "Ir" (sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) csdb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Ensure that reads of the counter are treated the same as memory reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * for the purposes of ordering by subsequent memory barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * This insanity brought to you by speculative system register reads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * out-of-order memory accesses, sequence locks and Thomas Gleixner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define arch_counter_enforce_ordering(val) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u64 tmp, _val = (val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) asm volatile( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) " eor %0, %1, %1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) " add %0, sp, %0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) " ldr xzr, [%0]" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) : "=r" (tmp) : "r" (_val)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define __smp_mb() dmb(ish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define __smp_rmb() dmb(ishld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define __smp_wmb() dmb(ishst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define __smp_store_release(p, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) typeof(p) __p = (p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) kasan_check_write(__p, sizeof(*p)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) switch (sizeof(*p)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) asm volatile ("stlrb %w1, %0" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) : "=Q" (*__p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) : "r" (*(__u8 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) asm volatile ("stlrh %w1, %0" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) : "=Q" (*__p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) : "r" (*(__u16 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) asm volatile ("stlr %w1, %0" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) : "=Q" (*__p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) : "r" (*(__u32 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) asm volatile ("stlr %1, %0" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) : "=Q" (*__p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) : "r" (*(__u64 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define __smp_load_acquire(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) typeof(p) __p = (p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) kasan_check_read(__p, sizeof(*p)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) switch (sizeof(*p)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) asm volatile ("ldarb %w0, %1" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) : "=r" (*(__u8 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) : "Q" (*__p) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) asm volatile ("ldarh %w0, %1" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) : "=r" (*(__u16 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) : "Q" (*__p) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) asm volatile ("ldar %w0, %1" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) : "=r" (*(__u32 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) : "Q" (*__p) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) asm volatile ("ldar %0, %1" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) : "=r" (*(__u64 *)__u.__c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) : "Q" (*__p) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (typeof(*p))__u.__val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define smp_cond_load_relaxed(ptr, cond_expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) typeof(ptr) __PTR = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __unqual_scalar_typeof(*ptr) VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) for (;;) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) VAL = READ_ONCE(*__PTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (cond_expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __cmpwait_relaxed(__PTR, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) (typeof(*ptr))VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define smp_cond_load_acquire(ptr, cond_expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) typeof(ptr) __PTR = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __unqual_scalar_typeof(*ptr) VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) for (;;) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) VAL = smp_load_acquire(__PTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (cond_expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __cmpwait_relaxed(__PTR, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) (typeof(*ptr))VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #endif /* __ASM_BARRIER_H */