Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include "main.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <malloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <assert.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define SMP_CACHE_BYTES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define cache_line_size() SMP_CACHE_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define unlikely(x)    (__builtin_expect(!!(x), 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define likely(x)    (__builtin_expect(!!(x), 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define SIZE_MAX        (~(size_t)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define KMALLOC_MAX_SIZE SIZE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) typedef pthread_spinlock_t  spinlock_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) typedef int gfp_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define __GFP_ZERO 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static void *kmalloc(unsigned size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	void *p = memalign(64, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if (gfp & __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		memset(p, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static inline void *kzalloc(unsigned size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	return kmalloc(size, flags | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (size != 0 && n > SIZE_MAX / size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return kmalloc(n * size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return kmalloc_array(n, size, flags | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static void kfree(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define kvmalloc_array kmalloc_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define kvfree kfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static void spin_lock_init(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	int r = pthread_spin_init(lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	assert(!r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static void spin_lock(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int ret = pthread_spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	assert(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static void spin_unlock(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int ret = pthread_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	assert(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static void spin_lock_bh(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static void spin_unlock_bh(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static void spin_lock_irq(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static void spin_unlock_irq(spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #include "../../../include/linux/ptr_ring.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static unsigned long long headcnt, tailcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static struct ptr_ring array ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* implemented by ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void alloc_ring(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int ret = ptr_ring_init(&array, ring_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	assert(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/* Hacky way to poke at ring internals. Useful for testing though. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		array.batch = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* guest side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int add_inbuf(unsigned len, void *buf, void *datap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	ret = __ptr_ring_produce(&array, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		headcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * ptr_ring API provides no way for producer to find out whether a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * buffer was consumed.  Our tests merely require that a successful get_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * fake it accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void *get_buf(unsigned *lenp, void **bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	void *datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (tailcnt == headcnt || __ptr_ring_full(&array))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		datap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		datap = "Buffer\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		++tailcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool used_empty()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return (tailcnt == headcnt || __ptr_ring_full(&array));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void disable_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool enable_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void kick_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* host side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void disable_kick()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bool enable_kick()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bool avail_empty()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return __ptr_ring_empty(&array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bool use_buf(unsigned *lenp, void **bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	ptr = __ptr_ring_consume(&array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void call_used(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }