Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) ########################################################################
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) # Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) # Copyright (c) 2013, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) # Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #     Erdinc Ozturk <erdinc.ozturk@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #     Vinodh Gopal <vinodh.gopal@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #     James Guilford <james.guilford@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #     Tim Chen <tim.c.chen@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) # This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) # licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) # General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) # COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) # OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) # Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) # modification, are permitted provided that the following conditions are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) # met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) # * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #   notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) # * Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #   notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #   documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #   distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) # * Neither the name of the Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #   contributors may be used to endorse or promote products derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #   this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #       Reference paper titled "Fast CRC Computation for Generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #	Polynomials Using PCLMULQDQ Instruction"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #       URL: http://www.intel.com/content/dam/www/public/us/en/documents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define		init_crc	%edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define		buf		%rsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define		len		%rdx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define		FOLD_CONSTS	%xmm10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define		BSWAP_MASK	%xmm11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) # Fold reg1, reg2 into the next 32 data bytes, storing the result back into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) # reg1, reg2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) .macro	fold_32_bytes	offset, reg1, reg2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	movdqu	\offset(buf), %xmm9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	movdqu	\offset+16(buf), %xmm12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	pshufb	BSWAP_MASK, %xmm9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	pshufb	BSWAP_MASK, %xmm12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	movdqa	\reg1, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	movdqa	\reg2, %xmm13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	pclmulqdq	$0x00, FOLD_CONSTS, \reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	pclmulqdq	$0x00, FOLD_CONSTS, \reg2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	pxor	%xmm9 , \reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	xorps	%xmm8 , \reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	pxor	%xmm12, \reg2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	xorps	%xmm13, \reg2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) # Fold src_reg into dst_reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) .macro	fold_16_bytes	src_reg, dst_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	movdqa	\src_reg, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	pclmulqdq	$0x11, FOLD_CONSTS, \src_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	pxor	%xmm8, \dst_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	xorps	\src_reg, \dst_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) # u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) # Assumes len >= 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) SYM_FUNC_START(crc_t10dif_pcl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	movdqa	.Lbswap_mask(%rip), BSWAP_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	# For sizes less than 256 bytes, we can't fold 128 bytes at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	cmp	$256, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	jl	.Lless_than_256_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	# Load the first 128 data bytes.  Byte swapping is necessary to make the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	# bit order match the polynomial coefficient order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	movdqu	16*0(buf), %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	movdqu	16*1(buf), %xmm1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	movdqu	16*2(buf), %xmm2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	movdqu	16*3(buf), %xmm3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	movdqu	16*4(buf), %xmm4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	movdqu	16*5(buf), %xmm5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	movdqu	16*6(buf), %xmm6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	movdqu	16*7(buf), %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	add	$128, buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	pshufb	BSWAP_MASK, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	pshufb	BSWAP_MASK, %xmm1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	pshufb	BSWAP_MASK, %xmm2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pshufb	BSWAP_MASK, %xmm3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	pshufb	BSWAP_MASK, %xmm4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	pshufb	BSWAP_MASK, %xmm5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	pshufb	BSWAP_MASK, %xmm6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	pshufb	BSWAP_MASK, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	# XOR the first 16 data *bits* with the initial CRC value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	pxor	%xmm8, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	pinsrw	$7, init_crc, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	pxor	%xmm8, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	movdqa	.Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	# Subtract 128 for the 128 data bytes just consumed.  Subtract another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	# 128 to simplify the termination condition of the following loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	sub	$256, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	# While >= 128 data bytes remain (not counting xmm0-7), fold the 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	# bytes xmm0-7 into them, storing the result back into xmm0-7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .Lfold_128_bytes_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	fold_32_bytes	0, %xmm0, %xmm1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	fold_32_bytes	32, %xmm2, %xmm3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	fold_32_bytes	64, %xmm4, %xmm5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	fold_32_bytes	96, %xmm6, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	add	$128, buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	sub	$128, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	jge	.Lfold_128_bytes_loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	# Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	# Fold across 64 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	movdqa	.Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	fold_16_bytes	%xmm0, %xmm4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	fold_16_bytes	%xmm1, %xmm5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	fold_16_bytes	%xmm2, %xmm6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	fold_16_bytes	%xmm3, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	# Fold across 32 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	movdqa	.Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	fold_16_bytes	%xmm4, %xmm6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	fold_16_bytes	%xmm5, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	# Fold across 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	movdqa	.Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	fold_16_bytes	%xmm6, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	# Add 128 to get the correct number of data bytes remaining in 0...127
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	# (not counting xmm7), following the previous extra subtraction by 128.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	# Then subtract 16 to simplify the termination condition of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	# following loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	add	$128-16, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	# While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	# xmm7 into them, storing the result back into xmm7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	jl	.Lfold_16_bytes_loop_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .Lfold_16_bytes_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	movdqa	%xmm7, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	pxor	%xmm8, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	movdqu	(buf), %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	pshufb	BSWAP_MASK, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	pxor	%xmm0 , %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	add	$16, buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	sub	$16, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	jge	.Lfold_16_bytes_loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .Lfold_16_bytes_loop_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	# Add 16 to get the correct number of data bytes remaining in 0...15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	# (not counting xmm7), following the previous extra subtraction by 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	add	$16, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	je	.Lreduce_final_16_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .Lhandle_partial_segment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	# Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	# bytes are in xmm7 and the rest are the remaining data in 'buf'.  To do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	# this without needing a fold constant for each possible 'len', redivide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	# the bytes into a first chunk of 'len' bytes and a second chunk of 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	# bytes, then fold the first chunk into the second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	movdqa	%xmm7, %xmm2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	# xmm1 = last 16 original data bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	movdqu	-16(buf, len), %xmm1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	pshufb	BSWAP_MASK, %xmm1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	# xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	lea	.Lbyteshift_table+16(%rip), %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	sub	len, %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	movdqu	(%rax), %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	pshufb	%xmm0, %xmm2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	# xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	pxor	.Lmask1(%rip), %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	pshufb	%xmm0, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	# xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	# then '16-len' bytes from xmm2 (high-order bytes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	pblendvb	%xmm2, %xmm1	#xmm0 is implicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	# Fold the first chunk into the second chunk, storing the result in xmm7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	movdqa	%xmm7, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	pxor	%xmm8, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	pxor	%xmm1, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .Lreduce_final_16_bytes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	# Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	# Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	movdqa	.Lfinal_fold_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	# Fold the high 64 bits into the low 64 bits, while also multiplying by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	# x^64.  This produces a 128-bit value congruent to x^64 * M(x) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	# whose low 48 bits are 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	movdqa	%xmm7, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	pslldq	$8, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	pxor	%xmm0, %xmm7			  # + low bits * x^64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	# Fold the high 32 bits into the low 96 bits.  This produces a 96-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	# value congruent to x^64 * M(x) and whose low 48 bits are 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	movdqa	%xmm7, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	pand	.Lmask2(%rip), %xmm0		  # zero high 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	psrldq	$12, %xmm7			  # extract high 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	pclmulqdq	$0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	pxor	%xmm0, %xmm7			  # + low bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	# Load G(x) and floor(x^48 / G(x)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	movdqa	.Lbarrett_reduction_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	# Use Barrett reduction to compute the final CRC value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	movdqa	%xmm7, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	psrlq	$32, %xmm7			  # /= x^32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	pclmulqdq	$0x00, FOLD_CONSTS, %xmm7 # *= G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	psrlq	$48, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	pxor	%xmm7, %xmm0		     # + low 16 nonzero bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	pextrw	$0, %xmm0, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .Lless_than_256_bytes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	# Checksumming a buffer of length 16...255 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	# Load the first 16 data bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	movdqu	(buf), %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	pshufb	BSWAP_MASK, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	add	$16, buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	# XOR the first 16 data *bits* with the initial CRC value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	pxor	%xmm0, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	pinsrw	$7, init_crc, %xmm0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	pxor	%xmm0, %xmm7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	movdqa	.Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	cmp	$16, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	je	.Lreduce_final_16_bytes		# len == 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	sub	$32, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	jge	.Lfold_16_bytes_loop		# 32 <= len <= 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	add	$16, len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	jmp	.Lhandle_partial_segment	# 17 <= len <= 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) SYM_FUNC_END(crc_t10dif_pcl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .section	.rodata, "a", @progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) # Fold constants precomputed from the polynomial 0x18bb7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) # G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .Lfold_across_128_bytes_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.quad		0x0000000000006123	# x^(8*128)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.quad		0x0000000000002295	# x^(8*128+64)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .Lfold_across_64_bytes_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.quad		0x0000000000001069	# x^(4*128)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.quad		0x000000000000dd31	# x^(4*128+64)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) .Lfold_across_32_bytes_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	.quad		0x000000000000857d	# x^(2*128)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	.quad		0x0000000000007acc	# x^(2*128+64)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) .Lfold_across_16_bytes_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	.quad		0x000000000000a010	# x^(1*128)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	.quad		0x0000000000001faa	# x^(1*128+64)	mod G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) .Lfinal_fold_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	.quad		0x1368000000000000	# x^48 * (x^48 mod G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	.quad		0x2d56000000000000	# x^48 * (x^80 mod G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .Lbarrett_reduction_consts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	.quad		0x0000000000018bb7	# G(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	.quad		0x00000001f65a57f8	# floor(x^48 / G(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .section	.rodata.cst16.mask1, "aM", @progbits, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .Lmask1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	.octa	0x80808080808080808080808080808080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) .section	.rodata.cst16.mask2, "aM", @progbits, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .Lmask2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	.octa	0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) .section	.rodata.cst16.bswap_mask, "aM", @progbits, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .Lbswap_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.octa	0x000102030405060708090A0B0C0D0E0F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .section	.rodata.cst32.byteshift_table, "aM", @progbits, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) # For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) # is the index vector to shift left by 'len' bytes, and is also {0x80, ...,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) # 0x80} XOR the index vector to shift right by '16 - len' bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .Lbyteshift_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0