^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef _ASM_POWERPC_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _ASM_POWERPC_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/asm-const.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Memory barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The sync instruction guarantees that all memory accesses initiated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * by this processor have been performed (with respect to all other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * mechanisms that access memory). The eieio instruction is a barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * providing an ordering (separately) for (a) cacheable stores and (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * loads and stores to non-cacheable memory (e.g. I/O devices).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * mb() prevents loads and stores being reordered across this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * rmb() prevents loads being reordered across this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * wmb() prevents stores being reordered across this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * *mb() variants without smp_ prefix must order all types of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * operations with one another. sync is the only instruction sufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * For the smp_ barriers, ordering is for cacheable memory operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * only. We have to use the sync instruction for smp_mb(), since lwsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * doesn't order loads with respect to previous stores. Lwsync can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * used for smp_rmb() and smp_wmb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * However, on CPUs that don't support lwsync, lwsync actually maps to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define mb() __asm__ __volatile__ ("sync" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* The sub-arch has lwsync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) # define SMPWMB LWSYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) # define SMPWMB eieio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* clang defines this macro for a builtin, which will not work with runtime patching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #undef __lwsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define dma_rmb() __lwsync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define __smp_lwsync() __lwsync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define __smp_mb() mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define __smp_rmb() __lwsync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * This is a barrier which prevents following instructions from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * started until the value of the argument x is known. For example, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * x is a variable loaded from memory, this prevents following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * instructions from being executed until the load has been performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define data_barrier(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define __smp_store_release(p, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __smp_lwsync(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) WRITE_ONCE(*p, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define __smp_load_acquire(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) typeof(*p) ___p1 = READ_ONCE(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __smp_lwsync(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ___p1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define smp_cond_load_relaxed(ptr, cond_expr) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) typeof(ptr) __PTR = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __unqual_scalar_typeof(*ptr) VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) VAL = READ_ONCE(*__PTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (unlikely(!(cond_expr))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) spin_begin(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) VAL = READ_ONCE(*__PTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } while (!(cond_expr)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spin_end(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) (typeof(*ptr))VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define NOSPEC_BARRIER_SLOT nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #elif defined(CONFIG_PPC_FSL_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define NOSPEC_BARRIER_SLOT nop; nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_PPC_BARRIER_NOSPEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Prevent execution of subsequent instructions until preceding branches have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * been fully resolved and are no longer executing speculatively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) // This also acts as a compiler barrier due to the memory clobber.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #else /* !CONFIG_PPC_BARRIER_NOSPEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define barrier_nospec_asm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define barrier_nospec()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif /* CONFIG_PPC_BARRIER_NOSPEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * pmem_wmb() ensures that all stores for which the modification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * are written to persistent storage by preceding dcbfps/dcbstps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * instructions have updated persistent storage before any data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * access or data transfer caused by subsequent instructions is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * initiated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define pmem_wmb() __asm__ __volatile__(PPC_PHWSYNC ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif /* _ASM_POWERPC_BARRIER_H */