Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Cryptographic API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Support for VIA PadLock hardware crypto engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <crypto/padlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Number of data blocks actually fetched for each xcrypt insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * Processors with prefetch errata will fetch extra blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static unsigned int ecb_fetch_blocks = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MAX_ECB_FETCH_BLOCKS (8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static unsigned int cbc_fetch_blocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define MAX_CBC_FETCH_BLOCKS (4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* Control word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct cword {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned int __attribute__ ((__packed__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		rounds:4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		algo:3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		keygen:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		interm:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		encdec:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		ksize:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /* Whenever making any changes to the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * structure *make sure* you keep E, d_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * and cword aligned on 16 Bytes boundaries and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * the Hardware can access 16 * 16 bytes of E and d_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * (only the first 15 * 16 bytes matter but the HW reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * more).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) struct aes_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	u32 E[AES_MAX_KEYLENGTH_U32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 d_data[AES_MAX_KEYLENGTH_U32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		struct cword encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		struct cword decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	} cword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u32 *D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static DEFINE_PER_CPU(struct cword *, paes_last_cword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /* Tells whether the ACE is capable to generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)    the extended key for a given key_len. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) aes_hw_extkey_available(uint8_t key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	/* TODO: We should check the actual CPU model/stepping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	         as it's possible that the capability will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	         added in the next CPU revisions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (key_len == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static inline struct aes_ctx *aes_ctx_common(void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unsigned long addr = (unsigned long)ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned long align = PADLOCK_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (align <= crypto_tfm_ctx_alignment())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return (struct aes_ctx *)ALIGN(addr, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return aes_ctx_common(crypto_tfm_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return aes_ctx_common(crypto_skcipher_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		       unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct aes_ctx *ctx = aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	const __le32 *key = (const __le32 *)in_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct crypto_aes_ctx gen_aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (key_len % 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * If the hardware is capable of generating the extended key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * itself we must supply the plain key for both encryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * and decryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	ctx->D = ctx->E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	ctx->E[0] = le32_to_cpu(key[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	ctx->E[1] = le32_to_cpu(key[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	ctx->E[2] = le32_to_cpu(key[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	ctx->E[3] = le32_to_cpu(key[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	/* Prepare control words. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	memset(&ctx->cword, 0, sizeof(ctx->cword));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	ctx->cword.decrypt.encdec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ctx->cword.encrypt.ksize = (key_len - 16) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/* Don't generate extended keys if the hardware can do it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (aes_hw_extkey_available(key_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	ctx->D = ctx->d_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	ctx->cword.encrypt.keygen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	ctx->cword.decrypt.keygen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (aes_expandkey(&gen_aes, in_key, key_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		    &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			per_cpu(paes_last_cword, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* ====== Encryption/decryption routines ====== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* These are the real call to PadLock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline void padlock_reset_key(struct cword *cword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (cword != per_cpu(paes_last_cword, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifndef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		asm volatile ("pushfl; popfl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		asm volatile ("pushfq; popfq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline void padlock_store_cword(struct cword *cword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * While the padlock instructions don't use FP/SSE registers, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * generate a spurious DNA fault when CR0.TS is '1'.  Fortunately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * the kernel doesn't use CR0.TS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				  struct cword *control_word, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		      : "+S"(input), "+D"(output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		      : "d"(control_word), "b"(key), "c"(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 				 u8 *iv, struct cword *control_word, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		      : "+S" (input), "+D" (output), "+a" (iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		      : "d" (control_word), "b" (key), "c" (count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	return iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			   struct cword *cword, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * Padlock prefetches extra data so we must provide mapped input buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 * Assume there are at least 16 bytes of stack already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	memcpy(tmp, in, count * AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	rep_xcrypt_ecb(tmp, out, key, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			   u8 *iv, struct cword *cword, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * Padlock prefetches extra data so we must provide mapped input buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * Assume there are at least 16 bytes of stack already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	memcpy(tmp, in, count * AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			     struct cword *cword, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * We could avoid some copying here but it's probably not worth it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		ecb_crypt_copy(in, out, key, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	rep_xcrypt_ecb(in, out, key, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			    u8 *iv, struct cword *cword, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return cbc_crypt_copy(in, out, key, iv, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				      void *control_word, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	u32 initial = count & (ecb_fetch_blocks - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (count < ecb_fetch_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		ecb_crypt(input, output, key, control_word, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	count -= initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (initial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			      : "+S"(input), "+D"(output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			      : "d"(control_word), "b"(key), "c"(initial));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		      : "+S"(input), "+D"(output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		      : "d"(control_word), "b"(key), "c"(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 				     u8 *iv, void *control_word, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	u32 initial = count & (cbc_fetch_blocks - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (count < cbc_fetch_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return cbc_crypt(input, output, key, iv, control_word, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	count -= initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (initial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			      : "+S" (input), "+D" (output), "+a" (iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			      : "d" (control_word), "b" (key), "c" (initial));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		      : "+S" (input), "+D" (output), "+a" (iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		      : "d" (control_word), "b" (key), "c" (count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	return iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct aes_ctx *ctx = aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	padlock_reset_key(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	padlock_store_cword(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct aes_ctx *ctx = aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	padlock_reset_key(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	padlock_store_cword(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct crypto_alg aes_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	.cra_name		=	"aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	.cra_driver_name	=	"aes-padlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	.cra_priority		=	PADLOCK_CRA_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	.cra_blocksize		=	AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.cra_ctxsize		=	sizeof(struct aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	.cra_module		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	.cra_u			=	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		.cipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			.cia_setkey	   	= 	aes_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			.cia_encrypt	 	=	padlock_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			.cia_decrypt	  	=	padlock_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int ecb_aes_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	padlock_reset_key(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	while ((nbytes = walk.nbytes) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				   ctx->E, &ctx->cword.encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 				   nbytes / AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		nbytes &= AES_BLOCK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	padlock_store_cword(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int ecb_aes_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	padlock_reset_key(&ctx->cword.decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	while ((nbytes = walk.nbytes) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 				   ctx->D, &ctx->cword.decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				   nbytes / AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		nbytes &= AES_BLOCK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	padlock_store_cword(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static struct skcipher_alg ecb_aes_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.base.cra_name		=	"ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	.base.cra_driver_name	=	"ecb-aes-padlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	.base.cra_priority	=	PADLOCK_COMPOSITE_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	.base.cra_ctxsize	=	sizeof(struct aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	.base.cra_alignmask	=	PADLOCK_ALIGNMENT - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	.base.cra_module	=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	.min_keysize		=	AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	.max_keysize		=	AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	.setkey			=	aes_set_key_skcipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.encrypt		=	ecb_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	.decrypt		=	ecb_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int cbc_aes_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	padlock_reset_key(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	while ((nbytes = walk.nbytes) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 					    walk.dst.virt.addr, ctx->E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 					    walk.iv, &ctx->cword.encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 					    nbytes / AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		memcpy(walk.iv, iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		nbytes &= AES_BLOCK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	padlock_store_cword(&ctx->cword.decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int cbc_aes_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	padlock_reset_key(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	while ((nbytes = walk.nbytes) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				   ctx->D, walk.iv, &ctx->cword.decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				   nbytes / AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		nbytes &= AES_BLOCK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	padlock_store_cword(&ctx->cword.encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static struct skcipher_alg cbc_aes_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	.base.cra_name		=	"cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	.base.cra_driver_name	=	"cbc-aes-padlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	.base.cra_priority	=	PADLOCK_COMPOSITE_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.base.cra_ctxsize	=	sizeof(struct aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	.base.cra_alignmask	=	PADLOCK_ALIGNMENT - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.base.cra_module	=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	.min_keysize		=	AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.max_keysize		=	AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	.ivsize			=	AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	.setkey			=	aes_set_key_skcipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	.encrypt		=	cbc_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	.decrypt		=	cbc_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static const struct x86_cpu_id padlock_cpu_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static int __init padlock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	struct cpuinfo_x86 *c = &cpu_data(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (!x86_match_cpu(padlock_cpu_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if ((ret = crypto_register_alg(&aes_alg)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		goto aes_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		goto ecb_aes_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		goto cbc_aes_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) cbc_aes_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	crypto_unregister_skcipher(&ecb_aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ecb_aes_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	crypto_unregister_alg(&aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) aes_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void __exit padlock_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	crypto_unregister_skcipher(&cbc_aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	crypto_unregister_skcipher(&ecb_aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	crypto_unregister_alg(&aes_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) module_init(padlock_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) module_exit(padlock_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) MODULE_AUTHOR("Michal Ludvig");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) MODULE_ALIAS_CRYPTO("aes");