Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  * Memory barrier definitions.  This is based on information published
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  * in the Processor Abstraction Layer and the System Abstraction Layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5)  * manual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7)  * Copyright (C) 1998-2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8)  *	David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9)  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef _ASM_IA64_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define _ASM_IA64_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)  * Macros to force memory ordering.  In these descriptions, "previous"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)  * and "subsequent" refer to program order; "visible" means that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)  * architecturally visible effects of a memory access have occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)  * (at a minimum, this means the memory has been read or written).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)  *   wmb():	Guarantees that all preceding stores to memory-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)  *		like regions are visible before any subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)  *		stores and that all following stores will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)  *		visible only after all previous stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)  *   rmb():	Like wmb(), but for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)  *   mb():	wmb()/rmb() combo, i.e., all previous memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)  *		accesses are visible before all subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)  *		accesses and vice versa.  This is also known as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)  *		a "fence."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)  * Note: "mb()" and its variants cannot be used as a fence to order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)  * accesses to memory mapped I/O registers.  For that, mf.a needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)  * be used.  However, we don't want to always use mf.a because (a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)  * it's (presumably) much slower than mf and (b) mf.a is supported for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)  * sequential memory pages only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define mb()		ia64_mf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define rmb()		mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define wmb()		mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define dma_rmb()	mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define dma_wmb()	mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) # define __smp_mb()	mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define __smp_mb__before_atomic()	barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define __smp_mb__after_atomic()	barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)  * need for asm trickery!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define __smp_store_release(p, v)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	compiletime_assert_atomic_type(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 	barrier();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 	WRITE_ONCE(*p, v);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define __smp_load_acquire(p)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 	typeof(*p) ___p1 = READ_ONCE(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 	compiletime_assert_atomic_type(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 	barrier();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 	___p1;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)  * The group barrier in front of the rsm & ssm are necessary to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)  * that none of the previous instructions in the same group are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)  * affected by the rsm/ssm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif /* _ASM_IA64_BARRIER_H */