Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_X86_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_X86_BARRIER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/nops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Force strict CPU ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * And yes, this might be required on UP too when we're talking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * to devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 				      X86_FEATURE_XMM2) ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 				       X86_FEATURE_XMM2) ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 				       X86_FEATURE_XMM2) ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define mb() 	asm volatile("mfence":::"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define rmb()	asm volatile("lfence":::"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define wmb()	asm volatile("sfence" ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * array_index_mask_nospec() - generate a mask that is ~0UL when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * 	bounds check succeeds and 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * @index: array element index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * @size: number of elements in array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *     0 - (index < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static inline unsigned long array_index_mask_nospec(unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	asm volatile ("cmp %1,%2; sbb %0,%0;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			:"=r" (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			:"g"(size),"r" (index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			:"cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* Override the default implementation from linux/nospec.h. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define array_index_mask_nospec array_index_mask_nospec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* Prevent speculative execution past this barrier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define dma_rmb()	barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define dma_wmb()	barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define __smp_mb()	asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define __smp_mb()	asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define __smp_rmb()	dma_rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define __smp_wmb()	barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define __smp_store_release(p, v)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	compiletime_assert_atomic_type(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	barrier();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	WRITE_ONCE(*p, v);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define __smp_load_acquire(p)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	typeof(*p) ___p1 = READ_ONCE(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	compiletime_assert_atomic_type(*p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	barrier();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	___p1;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) /* Atomic operations are already serializing on x86 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define __smp_mb__before_atomic()	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define __smp_mb__after_atomic()	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * Make previous memory operations globally visible before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * a WRMSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * MFENCE makes writes visible, but only affects load/store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * instructions.  WRMSR is unfortunately not a load/store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * instruction and is unaffected by MFENCE.  The LFENCE ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * that the WRMSR is not reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * Most WRMSRs are full serializing instructions themselves and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * do not require this barrier.  This is only required for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * IA32_TSC_DEADLINE and X2APIC MSRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void weak_wrmsr_fence(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	asm volatile("mfence; lfence" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif /* _ASM_X86_BARRIER_H */