^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared glue code for 128bit block ciphers, AVX2 assembler macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) vmovdqu (0*32)(src), x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) vmovdqu (1*32)(src), x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) vmovdqu (2*32)(src), x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) vmovdqu (3*32)(src), x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) vmovdqu (4*32)(src), x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) vmovdqu (5*32)(src), x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) vmovdqu (6*32)(src), x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) vmovdqu (7*32)(src), x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) vmovdqu x0, (0*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) vmovdqu x1, (1*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) vmovdqu x2, (2*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) vmovdqu x3, (3*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) vmovdqu x4, (4*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) vmovdqu x5, (5*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) vmovdqu x6, (6*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) vmovdqu x7, (7*32)(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) vpxor t0, t0, t0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) vinserti128 $1, (src), t0, t0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) vpxor t0, x0, x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) vpxor (0*32+16)(src), x1, x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) vpxor (1*32+16)(src), x2, x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) vpxor (2*32+16)(src), x3, x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) vpxor (3*32+16)(src), x4, x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) vpxor (4*32+16)(src), x5, x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) vpxor (5*32+16)(src), x6, x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) vpxor (6*32+16)(src), x7, x7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define inc_le128(x, minus_one, tmp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) vpcmpeqq minus_one, x, tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) vpsubq minus_one, x, x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) vpslldq $8, tmp, tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) vpsubq tmp, x, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) vpcmpeqq minus_one, x, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) vpcmpeqq minus_two, x, tmp2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) vpsubq minus_two, x, x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) vpor tmp2, tmp1, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) vpslldq $8, tmp1, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) vpsubq tmp1, x, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) t1x, t2, t2x, t3, t3x, t4, t5) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) vpcmpeqd t0, t0, t0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* load IV and byteswap */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vmovdqu (iv), t2x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) vmovdqa t2x, t3x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) inc_le128(t2x, t0x, t1x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) vbroadcasti128 bswap, t1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) vinserti128 $1, t2x, t3, t2; /* ab: le0 ; cd: le1 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) vpshufb t1, t2, x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* construct IVs */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) vpshufb t1, t2, x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) vpshufb t1, t2, x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) vpshufb t1, t2, x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) vpshufb t1, t2, x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) vpshufb t1, t2, x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) vpshufb t1, t2, x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) add2_le128(t2, t0, t4, t3, t5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) vpshufb t1, t2, x7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) vextracti128 $1, t2, t2x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) inc_le128(t2x, t0x, t3x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) vmovdqu t2x, (iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) vpxor (0*32)(src), x0, x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) vpxor (1*32)(src), x1, x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) vpxor (2*32)(src), x2, x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) vpxor (3*32)(src), x3, x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) vpxor (4*32)(src), x4, x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) vpxor (5*32)(src), x5, x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) vpxor (6*32)(src), x6, x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) vpxor (7*32)(src), x7, x7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define gf128mul_x_ble(iv, mask, tmp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) vpsrad $31, iv, tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) vpaddq iv, iv, iv; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) vpshufd $0x13, tmp, tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) vpand mask, tmp, tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) vpxor tmp, iv, iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) vpsrad $31, iv, tmp0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) vpaddq iv, iv, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) vpsllq $2, iv, iv; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) vpshufd $0x13, tmp0, tmp0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) vpsrad $31, tmp1, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) vpand mask2, tmp0, tmp0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) vpshufd $0x13, tmp1, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) vpxor tmp0, iv, iv; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) vpand mask1, tmp1, tmp1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) vpxor tmp1, iv, iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tivx, t0, t0x, t1, t1x, t2, t2x, t3, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) xts_gf128mul_and_shl1_mask_0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) xts_gf128mul_and_shl1_mask_1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) vbroadcasti128 xts_gf128mul_and_shl1_mask_0, t1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* load IV and construct second IV */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) vmovdqu (iv), tivx; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) vmovdqa tivx, t0x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) gf128mul_x_ble(tivx, t1x, t2x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) vbroadcasti128 xts_gf128mul_and_shl1_mask_1, t2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) vinserti128 $1, tivx, t0, tiv; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) vpxor (0*32)(src), tiv, x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) vmovdqu tiv, (0*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* construct and store IVs, also xor with source */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vpxor (1*32)(src), tiv, x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) vmovdqu tiv, (1*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vpxor (2*32)(src), tiv, x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vmovdqu tiv, (2*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) vpxor (3*32)(src), tiv, x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) vmovdqu tiv, (3*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) vpxor (4*32)(src), tiv, x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) vmovdqu tiv, (4*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) vpxor (5*32)(src), tiv, x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) vmovdqu tiv, (5*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) vpxor (6*32)(src), tiv, x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) vmovdqu tiv, (6*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vpxor (7*32)(src), tiv, x7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) vmovdqu tiv, (7*32)(dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) vextracti128 $1, tiv, tivx; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) gf128mul_x_ble(tivx, t1x, t2x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) vmovdqu tivx, (iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) vpxor (0*32)(dst), x0, x0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) vpxor (1*32)(dst), x1, x1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) vpxor (2*32)(dst), x2, x2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) vpxor (3*32)(dst), x3, x3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) vpxor (4*32)(dst), x4, x4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) vpxor (5*32)(dst), x5, x5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) vpxor (6*32)(dst), x6, x6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) vpxor (7*32)(dst), x7, x7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);