^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Generic barrier definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * It should be possible to use these on really simple architectures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * but it serves more as a starting point for new ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef __ASM_GENERIC_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define __ASM_GENERIC_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/rwonce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifndef nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define nop() asm volatile ("nop")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Force strict CPU ordering. And yes, this is required on UP too when we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * talking to devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Fall back to compiler barriers if nothing better is provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifndef mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define mb() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifndef rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define rmb() mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifndef wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define wmb() mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifndef dma_rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define dma_rmb() rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifndef dma_wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define dma_wmb() wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #ifndef __smp_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __smp_mb() mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifndef __smp_rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define __smp_rmb() rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #ifndef __smp_wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define __smp_wmb() wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifndef smp_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define smp_mb() __smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #ifndef smp_rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define smp_rmb() __smp_rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifndef smp_wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define smp_wmb() __smp_wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #else /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifndef smp_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define smp_mb() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #ifndef smp_rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define smp_rmb() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifndef smp_wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define smp_wmb() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifndef __smp_store_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifndef __smp_mb__before_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define __smp_mb__before_atomic() __smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #ifndef __smp_mb__after_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define __smp_mb__after_atomic() __smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifndef __smp_store_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define __smp_store_release(p, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) WRITE_ONCE(*p, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifndef __smp_load_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define __smp_load_acquire(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) (typeof(*p))___p1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #ifndef smp_store_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define smp_store_mb(var, value) __smp_store_mb(var, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifndef smp_mb__before_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define smp_mb__before_atomic() __smp_mb__before_atomic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifndef smp_mb__after_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define smp_mb__after_atomic() __smp_mb__after_atomic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #ifndef smp_store_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define smp_store_release(p, v) __smp_store_release(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifndef smp_load_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define smp_load_acquire(p) __smp_load_acquire(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #else /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifndef smp_store_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifndef smp_mb__before_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define smp_mb__before_atomic() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #ifndef smp_mb__after_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define smp_mb__after_atomic() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifndef smp_store_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define smp_store_release(p, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) WRITE_ONCE(*p, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifndef smp_load_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define smp_load_acquire(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) compiletime_assert_atomic_type(*p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (typeof(*p))___p1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Barriers for virtual machine guests when talking to an SMP host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define virt_mb() __smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define virt_rmb() __smp_rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define virt_wmb() __smp_wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define virt_store_mb(var, value) __smp_store_mb(var, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define virt_mb__before_atomic() __smp_mb__before_atomic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define virt_mb__after_atomic() __smp_mb__after_atomic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define virt_store_release(p, v) __smp_store_release(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define virt_load_acquire(p) __smp_load_acquire(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * A control dependency provides a LOAD->STORE order, the additional RMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * aka. (load)-ACQUIRE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Architectures that do not do load speculation can have this be barrier().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #ifndef smp_acquire__after_ctrl_dep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define smp_acquire__after_ctrl_dep() smp_rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @ptr: pointer to the variable to wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @cond: boolean expression to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Equivalent to using READ_ONCE() on the condition variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Due to C lacking lambda expressions we load the value of *ptr into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * pre-named variable @VAL to be used in @cond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #ifndef smp_cond_load_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define smp_cond_load_relaxed(ptr, cond_expr) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) typeof(ptr) __PTR = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __unqual_scalar_typeof(*ptr) VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for (;;) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) VAL = READ_ONCE(*__PTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (cond_expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) cpu_relax(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (typeof(*ptr))VAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @ptr: pointer to the variable to wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @cond: boolean expression to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Equivalent to using smp_load_acquire() on the condition variable but employs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * the control dependency of the wait to reduce the barrier on many platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #ifndef smp_cond_load_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define smp_cond_load_acquire(ptr, cond_expr) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __unqual_scalar_typeof(*ptr) _val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) _val = smp_cond_load_relaxed(ptr, cond_expr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) smp_acquire__after_ctrl_dep(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (typeof(*ptr))_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * pmem_wmb() ensures that all stores for which the modification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * are written to persistent storage by preceding instructions have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * updated persistent storage before any data access or data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * caused by subsequent instructions is initiated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #ifndef pmem_wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define pmem_wmb() wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #endif /* __ASM_GENERIC_BARRIER_H */