Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2016 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Common macros and functions for ring benchmarking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef MAIN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define MAIN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <stdbool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) extern int param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) extern bool do_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #if defined(__x86_64__) || defined(__i386__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "x86intrin.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static inline void wait_cycles(unsigned long long cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	unsigned long long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	t = __rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	while (__rdtsc() - t < cycles) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define VMEXIT_CYCLES 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define VMENTRY_CYCLES 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #elif defined(__s390x__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static inline void wait_cycles(unsigned long long cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	asm volatile("0: brctg %0,0b" : : "d" (cycles));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /* tweak me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define VMEXIT_CYCLES 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define VMENTRY_CYCLES 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline void wait_cycles(unsigned long long cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	_Exit(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define VMEXIT_CYCLES 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define VMENTRY_CYCLES 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline void vmexit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (!do_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	wait_cycles(VMEXIT_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static inline void vmentry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (!do_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	wait_cycles(VMENTRY_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /* implemented by ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void alloc_ring(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /* guest side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) int add_inbuf(unsigned, void *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) void *get_buf(unsigned *, void **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void disable_call();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) bool used_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) bool enable_call();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) void kick_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /* host side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) void disable_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) bool avail_empty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) bool enable_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) bool use_buf(unsigned *, void **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void call_used();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) /* implemented by main */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) extern bool do_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void kick(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) void wait_for_kick(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) void call(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) void wait_for_call(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) extern unsigned ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* Compiler barrier - similar to what Linux uses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define barrier() asm volatile("" ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /* Is there a portable way to do this? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #if defined(__x86_64__) || defined(__i386__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define cpu_relax() asm ("rep; nop" ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #elif defined(__s390x__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define cpu_relax() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define cpu_relax() assert(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) extern bool do_relax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void busy_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (do_relax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		/* prevent compiler from removing busy loops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #if defined(__x86_64__) || defined(__i386__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * with other __ATOMIC_SEQ_CST calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define smp_mb() __sync_synchronize()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * This abuses the atomic builtins for thread fences, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * adds a compiler barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define smp_release() do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)     barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)     __atomic_thread_fence(__ATOMIC_RELEASE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define smp_acquire() do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)     barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define smp_wmb() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define smp_wmb() smp_release()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define smp_read_barrier_depends() smp_acquire()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define smp_read_barrier_depends() do {} while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __read_once_size(const volatile void *p, void *res, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)         switch (size) {                                                 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)         case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)         case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)         case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)         case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)         default:                                                        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)                 barrier();                                              \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)                 __builtin_memcpy((void *)res, (const void *)p, size);   \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)                 barrier();                                              \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)         }                                                               \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static __always_inline void __write_once_size(volatile void *p, void *res, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		__builtin_memcpy((void *)p, (const void *)res, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define READ_ONCE(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	union { typeof(x) __val; char __c[1]; } __u;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	__read_once_size(&(x), __u.__c, sizeof(x));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	__u.__val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define WRITE_ONCE(x, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	union { typeof(x) __val; char __c[1]; } __u =	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		{ .__val = (typeof(x)) (val) }; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	__write_once_size(&(x), __u.__c, sizeof(x));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	__u.__val;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif