^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Cryptographic API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Driver for EIP97 AES acceleration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Some ideas are from atmel-aes.c drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/gcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "mtk-platform.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define AES_QUEUE_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define AES_BUF_ORDER 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) & ~(AES_BLOCK_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) AES_BLOCK_SIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define AES_MAX_CT_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* AES-CBC/ECB/CTR/OFB/CFB command token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define AES_CMD0 cpu_to_le32(0x05000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define AES_CMD1 cpu_to_le32(0x2d060000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define AES_CMD2 cpu_to_le32(0xe4a63806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* AES-GCM command token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* AES transform information word 0 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* AES transform information word 1 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define AES_TFM_OFB cpu_to_le32(0x4 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* AES flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define AES_FLAGS_ECB BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define AES_FLAGS_CBC BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define AES_FLAGS_CTR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define AES_FLAGS_OFB BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define AES_FLAGS_CFB128 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define AES_FLAGS_GCM BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define AES_FLAGS_ENCRYPT BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define AES_FLAGS_BUSY BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * mtk_aes_info - hardware information of AES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @cmd: command token, hardware instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @tfm: transform state of cipher algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @state: contains keys and initial vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Memory layout of GCM buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * /-----------\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * | AES KEY | 128/196/256 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * |-----------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * | HASH KEY | a string 128 zero bits encrypted using the block cipher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * |-----------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * | IVs | 4 * 4 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * \-----------/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * The engine requires all these info to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * - Commands decoding and control of the engine's data path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * - Coordinating hardware data fetch and store operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * - Result token construction and output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct mtk_aes_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __le32 cmd[AES_MAX_CT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __le32 tfm[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __le32 state[AES_MAX_STATE_BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct mtk_aes_reqctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u64 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct mtk_aes_base_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct mtk_cryp *cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __le32 key[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) __le32 keymode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mtk_aes_fn start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct mtk_aes_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dma_addr_t ct_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dma_addr_t tfm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __le32 ct_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 ct_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct mtk_aes_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct mtk_aes_base_ctx base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct mtk_aes_ctr_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct mtk_aes_base_ctx base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct scatterlist src[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct scatterlist dst[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct mtk_aes_gcm_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct mtk_aes_base_ctx base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) size_t textlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct mtk_aes_drv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Device list lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static struct mtk_aes_drv mtk_aes = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return readl_relaxed(cryp->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline void mtk_aes_write(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) writel_relaxed(value, cryp->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct mtk_cryp *cryp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct mtk_cryp *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_lock_bh(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!ctx->cryp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cryp = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ctx->cryp = cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cryp = ctx->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) spin_unlock_bh(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline size_t mtk_aes_padlen(size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) len &= AES_BLOCK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return len ? AES_BLOCK_SIZE - len : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct mtk_aes_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for (nents = 0; sg; sg = sg_next(sg), ++nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!IS_ALIGNED(sg->offset, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (len <= sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dma->nents = nents + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dma->remainder = sg->length - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) sg->length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) len -= sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) const struct mtk_aes_reqctx *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Clear all but persistent flags and set request flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct scatterlist *sg = dma->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int nents = dma->nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!dma->remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) while (--nents > 0 && sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) sg->length += dma->remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline int mtk_aes_complete(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) aes->flags &= ~AES_FLAGS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) aes->areq->complete(aes->areq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Handle new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) tasklet_schedule(&aes->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Write descriptors for processing. This will configure the engine, load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * the transform information and then start the packet processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct mtk_ring *ring = cryp->ring[aes->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct mtk_desc *cmd = NULL, *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Write command descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cmd = ring->cmd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) cmd->buf = cpu_to_le32(sg_dma_address(ssg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (nents == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) cmd->hdr |= MTK_DESC_FIRST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) MTK_DESC_CT_LEN(aes->ctx->ct_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cmd->ct_hdr = aes->ctx->ct_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Shift ring buffer and check boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ring->cmd_next = ring->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cmd->hdr |= MTK_DESC_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Prepare result descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) res = ring->res_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) res->hdr = MTK_DESC_BUF_LEN(dsg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) res->buf = cpu_to_le32(sg_dma_address(dsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (nents == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) res->hdr |= MTK_DESC_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Shift ring buffer and check boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ring->res_next = ring->res_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) res->hdr |= MTK_DESC_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Pointer to current result descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ring->res_prev = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Prepare enough space for authenticated tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (aes->flags & AES_FLAGS_GCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) le32_add_cpu(&res->hdr, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Make sure that all changes to the DMA ring are done before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * start engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Start DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct mtk_aes_base_ctx *ctx = aes->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (aes->src.sg == aes->dst.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (aes->src.sg != &aes->aligned_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mtk_aes_restore_sg(&aes->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (aes->dst.sg != &aes->aligned_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) mtk_aes_restore_sg(&aes->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (aes->src.sg != &aes->aligned_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) mtk_aes_restore_sg(&aes->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (aes->dst.sg == &aes->aligned_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) aes->buf, aes->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct mtk_aes_base_ctx *ctx = aes->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct mtk_aes_info *info = &ctx->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (aes->src.sg == aes->dst.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) aes->src.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) aes->dst.sg_len = aes->src.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (unlikely(!aes->src.sg_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto sg_map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) aes->src.nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (unlikely(!aes->src.sg_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto sg_map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) aes->dst.nents, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (unlikely(!aes->dst.sg_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto sg_map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return mtk_aes_xmit(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) sg_map_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return mtk_aes_complete(cryp, aes, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct skcipher_request *req = skcipher_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct mtk_aes_base_ctx *ctx = aes->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct mtk_aes_info *info = &ctx->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u32 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) info->cmd[cnt++] = AES_CMD1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (aes->flags & AES_FLAGS_ENCRYPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) info->tfm[0] |= AES_TFM_BASIC_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) info->tfm[0] |= AES_TFM_BASIC_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case AES_FLAGS_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) info->tfm[1] = AES_TFM_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) case AES_FLAGS_ECB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) info->tfm[1] = AES_TFM_ECB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto ecb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) case AES_FLAGS_CTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) info->tfm[1] = AES_TFM_CTR_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case AES_FLAGS_OFB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) info->tfm[1] = AES_TFM_OFB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case AES_FLAGS_CFB128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) info->tfm[1] = AES_TFM_CFB128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Should not happen... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ctr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) le32_add_cpu(&info->tfm[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) info->tfm[1] |= AES_TFM_FULL_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) info->cmd[cnt++] = AES_CMD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ecb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ctx->ct_size = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct scatterlist *src, struct scatterlist *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) size_t padlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) bool src_aligned, dst_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) aes->total = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) aes->src.sg = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) aes->dst.sg = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) aes->real_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (src == dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dst_aligned = src_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!src_aligned || !dst_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) padlen = mtk_aes_padlen(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (len + padlen > AES_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return mtk_aes_complete(cryp, aes, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!src_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) aes->src.sg = &aes->aligned_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) aes->src.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) aes->src.remainder = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!dst_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) aes->dst.sg = &aes->aligned_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) aes->dst.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) aes->dst.remainder = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sg_init_table(&aes->aligned_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mtk_aes_info_init(cryp, aes, len + padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return mtk_aes_map(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct crypto_async_request *new_areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct mtk_aes_rec *aes = cryp->aes[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct crypto_async_request *areq, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct mtk_aes_base_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_lock_irqsave(&aes->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (new_areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = crypto_enqueue_request(&aes->queue, new_areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (aes->flags & AES_FLAGS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) spin_unlock_irqrestore(&aes->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) backlog = crypto_get_backlog(&aes->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) areq = crypto_dequeue_request(&aes->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) aes->flags |= AES_FLAGS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) spin_unlock_irqrestore(&aes->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ctx = crypto_tfm_ctx(areq->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Write key into state buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) aes->areq = areq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) aes->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ctx->start(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return mtk_aes_complete(cryp, aes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct skcipher_request *req = skcipher_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mtk_aes_set_mode(aes, rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) aes->resume = mtk_aes_transfer_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static inline struct mtk_aes_ctr_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return container_of(ctx, struct mtk_aes_ctr_ctx, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct mtk_aes_base_ctx *ctx = aes->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct skcipher_request *req = skcipher_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct scatterlist *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 start, end, ctr, blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) size_t datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) bool fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Check for transfer completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cctx->offset += aes->total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (cctx->offset >= req->cryptlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return mtk_aes_transfer_complete(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Compute data length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) datalen = req->cryptlen - cctx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ctr = be32_to_cpu(cctx->iv[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Check 32bit counter overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) start = ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) end = start + blocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (end < start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ctr = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) datalen = AES_BLOCK_SIZE * -start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) fragmented = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Jump to offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) dst = ((req->src == req->dst) ? src :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Write IVs into transform state buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (unlikely(fragmented)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Increment the counter manually to cope with the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * counter overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) cctx->iv[3] = cpu_to_be32(ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return mtk_aes_dma(cryp, aes, src, dst, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct skcipher_request *req = skcipher_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mtk_aes_set_mode(aes, rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) cctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) aes->total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) aes->resume = mtk_aes_ctr_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return mtk_aes_ctr_transfer(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Check and set the AES key to transform state buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static int mtk_aes_setkey(struct crypto_skcipher *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) const u8 *key, u32 keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ctx->keymode = AES_TFM_128BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ctx->keymode = AES_TFM_192BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ctx->keymode = AES_TFM_256BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ctx->keylen = SIZE_IN_WORDS(keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) memcpy(ctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct mtk_aes_reqctx *rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct mtk_cryp *cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) cryp = mtk_aes_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) rctx->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return mtk_aes_crypt(req, AES_FLAGS_ECB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return mtk_aes_crypt(req, AES_FLAGS_CBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return mtk_aes_crypt(req, AES_FLAGS_CTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return mtk_aes_crypt(req, AES_FLAGS_OFB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return mtk_aes_crypt(req, AES_FLAGS_CFB128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ctx->base.start = mtk_aes_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ctx->base.start = mtk_aes_ctr_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static struct skcipher_alg aes_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .base.cra_driver_name = "cbc-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .base.cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .base.cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .setkey = mtk_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .encrypt = mtk_aes_cbc_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .decrypt = mtk_aes_cbc_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) .init = mtk_aes_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .base.cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .base.cra_driver_name = "ecb-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .base.cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .base.cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .setkey = mtk_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .encrypt = mtk_aes_ecb_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .decrypt = mtk_aes_ecb_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .init = mtk_aes_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .base.cra_name = "ctr(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .base.cra_driver_name = "ctr-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .base.cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .base.cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .setkey = mtk_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .encrypt = mtk_aes_ctr_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) .decrypt = mtk_aes_ctr_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) .init = mtk_aes_ctr_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .base.cra_name = "ofb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .base.cra_driver_name = "ofb-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) .base.cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) .base.cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .setkey = mtk_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) .encrypt = mtk_aes_ofb_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .decrypt = mtk_aes_ofb_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .base.cra_name = "cfb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .base.cra_driver_name = "cfb-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) .base.cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) .base.cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) .setkey = mtk_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .encrypt = mtk_aes_cfb_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .decrypt = mtk_aes_cfb_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static inline struct mtk_aes_gcm_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return container_of(ctx, struct mtk_aes_gcm_ctx, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * Engine will verify and compare tag automatically, so we just need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * to check returned status which stored in the result descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) __le32 status = cryp->ring[aes->id]->res_prev->ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) -EBADMSG : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Initialize transform information of GCM mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct aead_request *req = aead_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct mtk_aes_base_ctx *ctx = aes->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct mtk_aes_info *info = &ctx->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u32 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) info->cmd[cnt++] = AES_GCM_CMD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (aes->flags & AES_FLAGS_ENCRYPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) info->tfm[0] = AES_TFM_GCM_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) info->tfm[0] = AES_TFM_GCM_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ctx->ct_size = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ctx->keymode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) AES_TFM_ENC_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) req->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct scatterlist *src, struct scatterlist *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) bool src_aligned, dst_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) aes->src.sg = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) aes->dst.sg = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) aes->real_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (src == dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dst_aligned = src_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!src_aligned || !dst_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (aes->total > AES_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return mtk_aes_complete(cryp, aes, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!src_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) aes->src.sg = &aes->aligned_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) aes->src.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) aes->src.remainder = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!dst_aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) aes->dst.sg = &aes->aligned_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) aes->dst.nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) aes->dst.remainder = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) sg_init_table(&aes->aligned_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) mtk_aes_gcm_info_init(cryp, aes, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return mtk_aes_map(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Todo: GMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct aead_request *req = aead_request_cast(aes->areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) u32 len = req->assoclen + req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) mtk_aes_set_mode(aes, rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (aes->flags & AES_FLAGS_ENCRYPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u32 tag[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) aes->resume = mtk_aes_transfer_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Compute total process length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) aes->total = len + gctx->authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Hardware will append authenticated tag to output buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) aes->resume = mtk_aes_gcm_tag_verify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) aes->total = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct mtk_cryp *cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) bool enc = !!(mode & AES_FLAGS_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cryp = mtk_aes_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* Compute text length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* Empty messages are not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!gctx->textlen && !req->assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) rctx->mode = AES_FLAGS_GCM | mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return mtk_aes_handle_queue(cryp, enc, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Because of the hardware limitation, we need to pre-calculate key(H)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * for the GHASH operation. The result of the encryption operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * need to be stored in the transform state buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u32 keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u8 x8[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } hash = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct crypto_aes_ctx aes_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ctx->keymode = AES_TFM_128BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ctx->keymode = AES_TFM_192BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ctx->keymode = AES_TFM_256BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ctx->keylen = SIZE_IN_WORDS(keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) err = aes_expandkey(&aes_ctx, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) aes_encrypt(&aes_ctx, hash.x8, hash.x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) memzero_explicit(&aes_ctx, sizeof(aes_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) memcpy(ctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* Why do we need to do this? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) hash.x32[i] = swab32(hash.x32[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) u32 authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* Same as crypto_gcm_authsize() from crypto/gcm.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) switch (authsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) gctx->authsize = authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int mtk_aes_gcm_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int mtk_aes_gcm_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return mtk_aes_gcm_crypt(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int mtk_aes_gcm_init(struct crypto_aead *aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ctx->base.start = mtk_aes_gcm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static struct aead_alg aes_gcm_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) .setkey = mtk_aes_gcm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .setauthsize = mtk_aes_gcm_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .encrypt = mtk_aes_gcm_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .decrypt = mtk_aes_gcm_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) .init = mtk_aes_gcm_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .ivsize = GCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .cra_name = "gcm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) .cra_driver_name = "gcm-aes-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .cra_alignmask = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void mtk_aes_queue_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void mtk_aes_done_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct mtk_cryp *cryp = aes->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) mtk_aes_unmap(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) aes->resume(cryp, aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct mtk_cryp *cryp = aes->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) mtk_aes_write(cryp, RDR_STAT(aes->id), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (likely(AES_FLAGS_BUSY & aes->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) mtk_aes_write(cryp, RDR_THRESH(aes->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) tasklet_schedule(&aes->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * The purpose of creating encryption and decryption records is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * to process outbound/inbound data in parallel, it can improve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * performance in most use cases, such as IPSec VPN, especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * under heavy network traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int mtk_aes_record_init(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct mtk_aes_rec **aes = cryp->aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int i, err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) for (i = 0; i < MTK_REC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (!aes[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) AES_BUF_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!aes[i]->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) aes[i]->cryp = cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spin_lock_init(&aes[i]->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) (unsigned long)aes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) (unsigned long)aes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Link to ring0 and ring1 respectively */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) aes[0]->id = MTK_RING0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) aes[1]->id = MTK_RING1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) err_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) for (; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) free_page((unsigned long)aes[i]->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) kfree(aes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static void mtk_aes_record_free(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) for (i = 0; i < MTK_REC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) tasklet_kill(&cryp->aes[i]->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) tasklet_kill(&cryp->aes[i]->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) free_page((unsigned long)cryp->aes[i]->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) kfree(cryp->aes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static void mtk_aes_unregister_algs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) crypto_unregister_aead(&aes_gcm_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) crypto_unregister_skcipher(&aes_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static int mtk_aes_register_algs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err = crypto_register_skcipher(&aes_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto err_aes_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) err = crypto_register_aead(&aes_gcm_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto err_aes_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) err_aes_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) for (; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) crypto_unregister_skcipher(&aes_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int mtk_cipher_alg_register(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) INIT_LIST_HEAD(&cryp->aes_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Initialize two cipher records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ret = mtk_aes_record_init(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) goto err_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 0, "mtk-aes", cryp->aes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) dev_err(cryp->dev, "unable to request AES irq.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 0, "mtk-aes", cryp->aes[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dev_err(cryp->dev, "unable to request AES irq.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* Enable ring0 and ring1 interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) spin_lock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) spin_unlock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) ret = mtk_aes_register_algs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) spin_lock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) list_del(&cryp->aes_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) spin_unlock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) err_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mtk_aes_record_free(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dev_err(cryp->dev, "mtk-aes initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) void mtk_cipher_alg_release(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) spin_lock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) list_del(&cryp->aes_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) spin_unlock(&mtk_aes.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) mtk_aes_unregister_algs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) mtk_aes_record_free(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }