^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Accelerated GHASH implementation with ARMv8 PMULL instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/neon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/simd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/b128ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <crypto/gf128mul.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <crypto/internal/simd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) MODULE_ALIAS_CRYPTO("ghash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define GHASH_BLOCK_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GHASH_DIGEST_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define GCM_IV_SIZE 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct ghash_key {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) be128 k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u64 h[][2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct ghash_desc_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u8 buf[GHASH_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct gcm_aes_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct crypto_aes_ctx aes_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct ghash_key ghash_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u64 const h[][2], const char *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u64 const h[][2], const char *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 const h[][2], u64 dg[], u8 ctr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 const rk[], int rounds, u8 tag[]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) asmlinkage int pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 const h[][2], u64 dg[], u8 ctr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 const rk[], int rounds, const u8 l[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) const u8 tag[], u64 authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int ghash_init(struct shash_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *ctx = (struct ghash_desc_ctx){};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void ghash_do_update(int blocks, u64 dg[], const char *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct ghash_key *key, const char *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) const u8 *in = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) in = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) src += GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) gf128mul_lle(&dst, &key->k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } while (--blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dg[0] = be64_to_cpu(dst.b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dg[1] = be64_to_cpu(dst.a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct ghash_key *key, const char *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void (*simd_update)(int blocks, u64 dg[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) const char *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u64 const h[][2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const char *head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (likely(crypto_simd_usable())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) kernel_neon_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) simd_update(blocks, dg, src, key->h, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) kernel_neon_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ghash_do_update(blocks, dg, src, key, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* avoid hogging the CPU for too long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int ghash_update(struct shash_desc *desc, const u8 *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ctx->count += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if ((partial + len) >= GHASH_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct ghash_key *key = crypto_shash_ctx(desc->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int p = GHASH_BLOCK_SIZE - partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) memcpy(ctx->buf + partial, src, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) src += p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) len -= p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) blocks = len / GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) len %= GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int chunk = min(blocks, MAX_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ghash_do_simd_update(chunk, ctx->digest, src, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) partial ? ctx->buf : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pmull_ghash_update_p8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) blocks -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) src += chunk * GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) partial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } while (unlikely(blocks > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) memcpy(ctx->buf + partial, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int ghash_final(struct shash_desc *desc, u8 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct ghash_key *key = crypto_shash_ctx(desc->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pmull_ghash_update_p8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) put_unaligned_be64(ctx->digest[1], dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) put_unaligned_be64(ctx->digest[0], dst + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *ctx = (struct ghash_desc_ctx){};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void ghash_reflect(u64 h[], const be128 *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) h[0] = (be64_to_cpu(k->b) << 1) | carry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (carry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) h[1] ^= 0xc200000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int ghash_setkey(struct crypto_shash *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const u8 *inkey, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct ghash_key *key = crypto_shash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (keylen != GHASH_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* needed for the fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ghash_reflect(key->h[0], &key->k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static struct shash_alg ghash_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .base.cra_name = "ghash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .base.cra_driver_name = "ghash-neon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .base.cra_priority = 150,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .base.cra_blocksize = GHASH_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .digestsize = GHASH_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .init = ghash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .update = ghash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .final = ghash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .setkey = ghash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .descsize = sizeof(struct ghash_desc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int num_rounds(struct crypto_aes_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * # of rounds specified by AES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * 128 bit key 10 rounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * 192 bit key 12 rounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * 256 bit key 14 rounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * => n byte key => 6 + (n/4) rounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 6 + ctx->key_length / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 key[GHASH_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) be128 h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* needed for the fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ghash_reflect(ctx->ghash_key.h[0], &ctx->ghash_key.k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) h = ctx->ghash_key.k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) gf128mul_lle(&h, &ctx->ghash_key.k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ghash_reflect(ctx->ghash_key.h[1], &h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) gf128mul_lle(&h, &ctx->ghash_key.k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ghash_reflect(ctx->ghash_key.h[2], &h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) gf128mul_lle(&h, &ctx->ghash_key.k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ghash_reflect(ctx->ghash_key.h[3], &h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) switch (authsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) case 12 ... 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int *buf_count, struct gcm_aes_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (*buf_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) memcpy(&buf[*buf_count], src, buf_added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) *buf_count += buf_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) src += buf_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) count -= buf_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int blocks = count / GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *buf_count ? buf : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pmull_ghash_update_p64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) src += blocks * GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) count %= GHASH_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) memcpy(buf, src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u8 buf[GHASH_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct scatter_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 len = req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) scatterwalk_start(&walk, req->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u32 n = scatterwalk_clamp(&walk, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u8 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) scatterwalk_start(&walk, sg_next(walk.sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) n = scatterwalk_clamp(&walk, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) p = scatterwalk_map(&walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) len -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) scatterwalk_unmap(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) scatterwalk_advance(&walk, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) scatterwalk_done(&walk, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pmull_ghash_update_p64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int gcm_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int nrounds = num_rounds(&ctx->aes_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u8 buf[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u8 iv[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u64 dg[2] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) be128 lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u8 *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) lengths.a = cpu_to_be64(req->assoclen * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) lengths.b = cpu_to_be64(req->cryptlen * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (req->assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) gcm_calculate_auth_mac(req, dg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) memcpy(iv, req->iv, GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) put_unaligned_be32(2, iv + GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err = skcipher_walk_aead_encrypt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (likely(crypto_simd_usable())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) const u8 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u8 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int nbytes = walk.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) tag = (u8 *)&lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) src = dst = memcpy(buf + sizeof(buf) - nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) src, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) } else if (nbytes < walk.total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) nbytes &= ~(AES_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) tag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) kernel_neon_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dg, iv, ctx->aes_key.key_enc, nrounds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) kernel_neon_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (unlikely(!nbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) memcpy(walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) buf + sizeof(buf) - nbytes, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } while (walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) while (walk.nbytes >= AES_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int blocks = walk.nbytes / AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) const u8 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u8 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int remaining = blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) aes_encrypt(&ctx->aes_key, buf, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) crypto_inc(iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dst += AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) src += AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) } while (--remaining > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ghash_do_update(blocks, dg, walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) &ctx->ghash_key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) err = skcipher_walk_done(&walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) walk.nbytes % AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* handle the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (walk.nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) aes_encrypt(&ctx->aes_key, buf, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) buf, walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) memcpy(buf, walk.dst.virt.addr, walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tag = (u8 *)&lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ghash_do_update(1, dg, tag, &ctx->ghash_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) walk.nbytes ? buf : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (walk.nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = skcipher_walk_done(&walk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) put_unaligned_be64(dg[1], tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) put_unaligned_be64(dg[0], tag + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) put_unaligned_be32(1, iv + GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) aes_encrypt(&ctx->aes_key, iv, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) crypto_xor(tag, iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* copy authtag to end of dst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) crypto_aead_authsize(aead), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int gcm_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned int authsize = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int nrounds = num_rounds(&ctx->aes_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u8 otag[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u8 buf[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u8 iv[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u64 dg[2] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) be128 lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u8 *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) lengths.a = cpu_to_be64(req->assoclen * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (req->assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) gcm_calculate_auth_mac(req, dg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) memcpy(iv, req->iv, GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) put_unaligned_be32(2, iv + GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) scatterwalk_map_and_copy(otag, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) req->assoclen + req->cryptlen - authsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) authsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) err = skcipher_walk_aead_decrypt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (likely(crypto_simd_usable())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) const u8 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u8 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int nbytes = walk.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tag = (u8 *)&lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) src = dst = memcpy(buf + sizeof(buf) - nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) src, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) } else if (nbytes < walk.total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) nbytes &= ~(AES_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kernel_neon_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = pmull_gcm_decrypt(nbytes, dst, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ctx->ghash_key.h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dg, iv, ctx->aes_key.key_enc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) nrounds, tag, otag, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) kernel_neon_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (unlikely(!nbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) memcpy(walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) buf + sizeof(buf) - nbytes, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) } while (walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) while (walk.nbytes >= AES_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int blocks = walk.nbytes / AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) const u8 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u8 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ghash_do_update(blocks, dg, walk.src.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) &ctx->ghash_key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) aes_encrypt(&ctx->aes_key, buf, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) crypto_inc(iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dst += AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) src += AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } while (--blocks > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) err = skcipher_walk_done(&walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) walk.nbytes % AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* handle the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (walk.nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) memcpy(buf, walk.src.virt.addr, walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tag = (u8 *)&lengths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ghash_do_update(1, dg, tag, &ctx->ghash_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) walk.nbytes ? buf : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (walk.nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) aes_encrypt(&ctx->aes_key, buf, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) buf, walk.nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err = skcipher_walk_done(&walk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) put_unaligned_be64(dg[1], tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) put_unaligned_be64(dg[0], tag + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) put_unaligned_be32(1, iv + GCM_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) aes_encrypt(&ctx->aes_key, iv, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) crypto_xor(tag, iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (crypto_memneq(tag, otag, authsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) memzero_explicit(tag, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static struct aead_alg gcm_aes_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) .ivsize = GCM_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .chunksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .setkey = gcm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) .setauthsize = gcm_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) .encrypt = gcm_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) .decrypt = gcm_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .base.cra_name = "gcm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .base.cra_driver_name = "gcm-aes-ce",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .base.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 4 * sizeof(u64[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static int __init ghash_ce_mod_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!cpu_have_named_feature(ASIMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (cpu_have_named_feature(PMULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return crypto_register_aead(&gcm_aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return crypto_register_shash(&ghash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void __exit ghash_ce_mod_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (cpu_have_named_feature(PMULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) crypto_unregister_aead(&gcm_aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) crypto_unregister_shash(&ghash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static const struct cpu_feature ghash_cpu_feature[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) { cpu_feature(PMULL) }, { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) module_init(ghash_ce_mod_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) module_exit(ghash_ce_mod_exit);