^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2017, Matt Brown, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * modify it under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * as published by the Free Software Foundation; either version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * 2 of the License, or (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * vpermxor$#.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Based on H. Peter Anvin's paper - The mathematics of RAID-6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * $#-way unrolled portable integer math RAID-6 instruction set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This file is postprocessed using unroll.awk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * syndrome calculations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * This can be run on systems which have both Altivec and vpermxor instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * This instruction was introduced in POWER8 - ISA v2.07.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/raid/pq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <altivec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) typedef vector unsigned char unative_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define NSIZE sizeof(unative_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 0x06, 0x04, 0x02,0x00};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 0x60, 0x40, 0x20, 0x00};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void **ptrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 **dptr = (u8 **)ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u8 *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int d, z, z0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unative_t wp$$, wq$$, wd$$;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) z0 = disks - 3; /* Highest data disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) p = dptr[z0+1]; /* XOR parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) q = dptr[z0+2]; /* RS syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) for (d = 0; d < bytes; d += NSIZE*$#) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) for (z = z0-1; z>=0; z--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* P syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) wp$$ = vec_xor(wp$$, wd$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Q syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) wq$$ = vec_xor(wq$$, wd$$);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *(unative_t *)&p[d+NSIZE*$$] = wp$$;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *(unative_t *)&q[d+NSIZE*$$] = wq$$;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) enable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) disable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int raid6_have_altivec_vpermxor(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #if $# == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int raid6_have_altivec_vpermxor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Check if arch has both altivec and the vpermxor instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) # ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) cpu_has_feature(CPU_FTR_ARCH_207S));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const struct raid6_calls raid6_vpermxor$# = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) raid6_vpermxor$#_gen_syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) raid6_have_altivec_vpermxor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "vpermxor$#",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif