Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2013 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #ifndef __ASM_PERCPU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define __ASM_PERCPU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/stack_pointer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) static inline void set_my_cpu_offset(unsigned long off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 				 "msr tpidr_el2, %0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 				 ARM64_HAS_VIRT_HOST_EXTN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 			:: "r" (off) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static inline unsigned long __hyp_my_cpu_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 * Non-VHE hyp code runs with preemption disabled. No need to hazard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	 * the register access against barrier() as in __kern_my_cpu_offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	return read_sysreg(tpidr_el2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static inline unsigned long __kern_my_cpu_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * We want to allow caching the value, so avoid using volatile and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * instead use a fake stack read to hazard against barrier().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	asm(ALTERNATIVE("mrs %0, tpidr_el1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			"mrs %0, tpidr_el2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			ARM64_HAS_VIRT_HOST_EXTN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		: "=r" (off) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		"Q" (*(const unsigned long *)current_stack_pointer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #ifdef __KVM_NVHE_HYPERVISOR__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define __my_cpu_offset __hyp_my_cpu_offset()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define __my_cpu_offset __kern_my_cpu_offset()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define PERCPU_RW_OPS(sz)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline unsigned long __percpu_read_##sz(void *ptr)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return READ_ONCE(*(u##sz *)ptr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static inline void __percpu_write_##sz(void *ptr, unsigned long val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	WRITE_ONCE(*(u##sz *)ptr, (u##sz)val);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static inline void							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) __percpu_##name##_case_##sz(void *ptr, unsigned long val)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned int loop;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u##sz tmp;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* LL/SC */							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	"1:	ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		#op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	"	stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	"	cbnz	%w[loop], 1b",					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/* LSE atomics */						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		#op_lse "\t%" #w "[val], %[ptr]\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		__nops(3))						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	: [loop] "=&r" (loop), [tmp] "=&r" (tmp),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	  [ptr] "+Q"(*(u##sz *)ptr)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	: [val] "r" ((u##sz)(val)));					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static inline u##sz							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) __percpu_##name##_return_case_##sz(void *ptr, unsigned long val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned int loop;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u##sz ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* LL/SC */							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	"1:	ldxr" #sfx "\t%" #w "[ret], %[ptr]\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	"	stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	"	cbnz	%w[loop], 1b",					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/* LSE atomics */						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		#op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		__nops(2))						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	: [loop] "=&r" (loop), [ret] "=&r" (ret),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	  [ptr] "+Q"(*(u##sz *)ptr)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	: [val] "r" ((u##sz)(val)));					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define PERCPU_OP(name, op_llsc, op_lse)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	__PERCPU_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	__PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	__PERCPU_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	__PERCPU_OP_CASE( ,  , name, 64, op_llsc, op_lse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PERCPU_RET_OP(name, op_llsc, op_lse)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	__PERCPU_RET_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	__PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	__PERCPU_RET_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	__PERCPU_RET_OP_CASE( ,  , name, 64, op_llsc, op_lse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) PERCPU_RW_OPS(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) PERCPU_RW_OPS(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) PERCPU_RW_OPS(32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) PERCPU_RW_OPS(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) PERCPU_OP(add, add, stadd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) PERCPU_OP(andnot, bic, stclr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PERCPU_OP(or, orr, stset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) PERCPU_RET_OP(add, add, ldadd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #undef PERCPU_RW_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #undef __PERCPU_OP_CASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #undef __PERCPU_RET_OP_CASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #undef PERCPU_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #undef PERCPU_RET_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * It would be nice to avoid the conditional call into the scheduler when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * re-enabling preemption for preemptible kernels, but doing that in a way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * which builds inside a module would mean messing directly with the preempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * count. If you do this, peterz and tglx will hunt you down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	int __ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	preempt_disable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 					raw_cpu_ptr(&(ptr2)),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 					o1, o2, n1, n2);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	preempt_enable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	__ret;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define _pcp_protect(op, pcp, ...)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	preempt_disable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	preempt_enable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define _pcp_protect_return(op, pcp, args...)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	typeof(pcp) __retval;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	preempt_disable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	preempt_enable_notrace();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	__retval;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define this_cpu_read_1(pcp)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	_pcp_protect_return(__percpu_read_8, pcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define this_cpu_read_2(pcp)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	_pcp_protect_return(__percpu_read_16, pcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define this_cpu_read_4(pcp)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	_pcp_protect_return(__percpu_read_32, pcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define this_cpu_read_8(pcp)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	_pcp_protect_return(__percpu_read_64, pcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define this_cpu_write_1(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	_pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define this_cpu_write_2(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	_pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define this_cpu_write_4(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	_pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define this_cpu_write_8(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	_pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define this_cpu_add_1(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	_pcp_protect(__percpu_add_case_8, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define this_cpu_add_2(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	_pcp_protect(__percpu_add_case_16, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define this_cpu_add_4(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	_pcp_protect(__percpu_add_case_32, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define this_cpu_add_8(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	_pcp_protect(__percpu_add_case_64, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define this_cpu_add_return_1(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	_pcp_protect_return(__percpu_add_return_case_8, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define this_cpu_add_return_2(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	_pcp_protect_return(__percpu_add_return_case_16, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define this_cpu_add_return_4(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define this_cpu_add_return_8(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define this_cpu_and_1(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	_pcp_protect(__percpu_andnot_case_8, pcp, ~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define this_cpu_and_2(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	_pcp_protect(__percpu_andnot_case_16, pcp, ~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define this_cpu_and_4(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	_pcp_protect(__percpu_andnot_case_32, pcp, ~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define this_cpu_and_8(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	_pcp_protect(__percpu_andnot_case_64, pcp, ~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define this_cpu_or_1(pcp, val)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	_pcp_protect(__percpu_or_case_8, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define this_cpu_or_2(pcp, val)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	_pcp_protect(__percpu_or_case_16, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define this_cpu_or_4(pcp, val)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	_pcp_protect(__percpu_or_case_32, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define this_cpu_or_8(pcp, val)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	_pcp_protect(__percpu_or_case_64, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define this_cpu_xchg_1(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	_pcp_protect_return(xchg_relaxed, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define this_cpu_xchg_2(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	_pcp_protect_return(xchg_relaxed, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define this_cpu_xchg_4(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	_pcp_protect_return(xchg_relaxed, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define this_cpu_xchg_8(pcp, val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	_pcp_protect_return(xchg_relaxed, pcp, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define this_cpu_cmpxchg_1(pcp, o, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define this_cpu_cmpxchg_2(pcp, o, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define this_cpu_cmpxchg_4(pcp, o, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define this_cpu_cmpxchg_8(pcp, o, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #ifdef __KVM_NVHE_HYPERVISOR__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define __per_cpu_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define per_cpu_offset(cpu)	__hyp_per_cpu_offset((cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #include <asm-generic/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #undef	this_cpu_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define	this_cpu_ptr		raw_cpu_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #undef	__this_cpu_read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define	__this_cpu_read		raw_cpu_read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #undef	__this_cpu_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define	__this_cpu_write	raw_cpu_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* __ASM_PERCPU_H */