Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * raid6_vx$#.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * $#-way unrolled RAID6 gen/xor functions for s390
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * based on the vector facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This file is postprocessed using unroll.awk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/raid/pq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) asm(".include \"asm/vx-insn.h\"\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define NSIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static inline void LOAD_CONST(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	asm volatile("VREPIB %v24,7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	asm volatile("VREPIB %v25,0x1d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * The SHLBYTE() operation shifts each of the 16 bytes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * vector register y left by 1 bit and stores the result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * vector register x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static inline void SHLBYTE(int x, int y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * For each of the 16 bytes in the vector register y the MASK()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * operation returns 0xFF if the high bit of the byte is 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * or 0x00 if the high bit is 0. The result is stored in vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * register x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline void MASK(int x, int y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	asm volatile ("VESRAVB	%0,%1,24" : : "i" (x), "i" (y));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static inline void AND(int x, int y, int z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static inline void XOR(int x, int y, int z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline void LOAD_DATA(int x, u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	typedef struct { u8 _[16 * $#]; } addrtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	register addrtype *__ptr asm("1") = (addrtype *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	asm volatile ("VLM %2,%3,0,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		      : : "m" (*__ptr), "a" (__ptr), "i" (x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			  "i" (x + $# - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static inline void STORE_DATA(int x, u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	typedef struct { u8 _[16 * $#]; } addrtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	register addrtype *__ptr asm("1") = (addrtype *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	asm volatile ("VSTM %2,%3,0,1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		      : "=m" (*__ptr) : "a" (__ptr), "i" (x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			"i" (x + $# - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static inline void COPY_VEC(int x, int y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct kernel_fpu vxstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	u8 **dptr, *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int d, z, z0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	kernel_fpu_begin(&vxstate, KERNEL_VXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	LOAD_CONST();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	dptr = (u8 **) ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	z0 = disks - 3;		/* Highest data disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	p = dptr[z0 + 1];	/* XOR parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	q = dptr[z0 + 2];	/* RS syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	for (d = 0; d < bytes; d += $#*NSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		LOAD_DATA(0,&dptr[z0][d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		COPY_VEC(8+$$,0+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		for (z = z0 - 1; z >= 0; z--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			MASK(16+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			AND(16+$$,16+$$,25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			SHLBYTE(8+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			XOR(8+$$,8+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			LOAD_DATA(16,&dptr[z][d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			XOR(0+$$,0+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			XOR(8+$$,8+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		STORE_DATA(0,&p[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		STORE_DATA(8,&q[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	kernel_fpu_end(&vxstate, KERNEL_VXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 					size_t bytes, void **ptrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct kernel_fpu vxstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u8 **dptr, *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int d, z, z0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	dptr = (u8 **) ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	z0 = stop;		/* P/Q right side optimization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	p = dptr[disks - 2];	/* XOR parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	q = dptr[disks - 1];	/* RS syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	kernel_fpu_begin(&vxstate, KERNEL_VXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	LOAD_CONST();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	for (d = 0; d < bytes; d += $#*NSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		/* P/Q data pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		LOAD_DATA(0,&dptr[z0][d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		COPY_VEC(8+$$,0+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		for (z = z0 - 1; z >= start; z--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			MASK(16+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			AND(16+$$,16+$$,25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			SHLBYTE(8+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			XOR(8+$$,8+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			LOAD_DATA(16,&dptr[z][d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			XOR(0+$$,0+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			XOR(8+$$,8+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		/* P/Q left side optimization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		for (z = start - 1; z >= 0; z--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			MASK(16+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			AND(16+$$,16+$$,25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			SHLBYTE(8+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			XOR(8+$$,8+$$,16+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		LOAD_DATA(16,&p[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		XOR(16+$$,16+$$,0+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		STORE_DATA(16,&p[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		LOAD_DATA(16,&q[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		XOR(16+$$,16+$$,8+$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		STORE_DATA(16,&q[d]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	kernel_fpu_end(&vxstate, KERNEL_VXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int raid6_s390vx$#_valid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return MACHINE_HAS_VX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct raid6_calls raid6_s390vx$# = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	raid6_s390vx$#_gen_syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	raid6_s390vx$#_xor_syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	raid6_s390vx$#_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	"vx128x$#",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };