^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Cryptographic API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Some ideas are from atmel-sha.c and omap-sham.c drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <crypto/hmac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mtk-platform.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define SHA_ALIGN_MSK (sizeof(u32) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define SHA_QUEUE_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define SHA_BUF_SIZE ((u32)PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SHA_OP_UPDATE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define SHA_OP_FINAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SHA_MAX_DIGEST_BUF_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* SHA command token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SHA_CT_SIZE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SHA_CMD0 cpu_to_le32(0x03020000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SHA_CMD1 cpu_to_le32(0x21060000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SHA_CMD2 cpu_to_le32(0xe0e63802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* SHA transform information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SHA_TFM_START cpu_to_le32(0x1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* SHA flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SHA_FLAGS_BUSY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SHA_FLAGS_FINAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SHA_FLAGS_FINUP BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SHA_FLAGS_SG BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SHA_FLAGS_SHA1 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SHA_FLAGS_SHA224 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SHA_FLAGS_SHA256 BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SHA_FLAGS_SHA384 BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SHA_FLAGS_SHA512 BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SHA_FLAGS_HMAC BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SHA_FLAGS_PAD BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * mtk_sha_info - hardware information of AES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @cmd: command token, hardware instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @tfm: transform state of cipher algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @state: contains keys and initial vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct mtk_sha_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __le32 ctrl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __le32 cmd[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __le32 tfm[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct mtk_sha_reqctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct mtk_sha_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u64 digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) size_t bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __le32 ct_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 ct_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dma_addr_t ct_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dma_addr_t tfm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Walk state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 offset; /* Offset in current sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 total; /* Total request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) size_t ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) size_t bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u8 *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct mtk_sha_hmac_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct crypto_shash *shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct mtk_sha_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct mtk_cryp *cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct mtk_sha_hmac_ctx base[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct mtk_sha_drv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Device list lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct mtk_sha_drv mtk_sha = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct ahash_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return readl_relaxed(cryp->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline void mtk_sha_write(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) writel_relaxed(value, cryp->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct mtk_desc **cmd_curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct mtk_desc **res_curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *cmd_curr = ring->cmd_next++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *res_curr = ring->res_next++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ring->cmd_next = ring->cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ring->res_next = ring->res_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct mtk_cryp *cryp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct mtk_cryp *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock_bh(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!tctx->cryp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cryp = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) tctx->cryp = cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) cryp = tctx->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Assign record id to tfm in round-robin fashion, and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * will help tfm to bind to corresponding descriptor rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tctx->id = cryp->rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cryp->rec = !cryp->rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_unlock_bh(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) count = min(ctx->sg->length - ctx->offset, ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Check if count <= 0 because the buffer is full or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * because the sg length is 0. In the latest case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * check if there is another sg in the list, a 0 length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * sg doesn't necessarily mean the end of the sg list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ctx->offset, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ctx->bufcnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ctx->offset += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ctx->total -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ctx->offset == ctx->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ctx->total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * The purpose of this padding is to ensure that the padded message is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * The bit "1" is appended at the end of the message followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * 128 bits block (SHA384/SHA512) equals to the message length in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * is appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * For SHA1/SHA224/SHA256, padlen is calculated as followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * - if message length < 56 bytes then padlen = 56 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * - else padlen = 64 + 56 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * For SHA384/SHA512, padlen is calculated as followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * - if message length < 112 bytes then padlen = 112 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * - else padlen = 128 + 112 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 index, padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) __be64 bits[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u64 size = ctx->digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) size += ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) bits[1] = cpu_to_be64(size << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) bits[0] = cpu_to_be64(size >> 61);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) index = ctx->bufcnt & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *(ctx->buffer + ctx->bufcnt) = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ctx->bufcnt += padlen + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ctx->flags |= SHA_FLAGS_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) index = ctx->bufcnt & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *(ctx->buffer + ctx->bufcnt) = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ctx->bufcnt += padlen + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ctx->flags |= SHA_FLAGS_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Initialize basic transform information of SHA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct mtk_sha_info *info = &ctx->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ctx->ct_hdr = SHA_CT_CTRL_HDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ctx->ct_size = SHA_CT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) info->tfm[0] |= SHA_TFM_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) info->tfm[0] |= SHA_TFM_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) info->tfm[0] |= SHA_TFM_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) info->tfm[0] |= SHA_TFM_SHA384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) info->tfm[0] |= SHA_TFM_SHA512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Should not happen... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) info->tfm[1] = SHA_TFM_HASH_STORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) info->ctrl[1] = info->tfm[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) info->cmd[0] = SHA_CMD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) info->cmd[1] = SHA_CMD1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Update input data length field of transform information and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * map it to DMA region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int mtk_sha_info_update(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct mtk_sha_rec *sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) size_t len1, size_t len2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct mtk_sha_info *info = &ctx->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ctx->ct_hdr |= cpu_to_le32(len1 + len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) info->cmd[0] &= ~SHA_DATA_LEN_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) info->cmd[0] |= cpu_to_le32(len1 + len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Setting SHA_TFM_START only for the first iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (ctx->digcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) info->ctrl[0] &= ~SHA_TFM_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ctx->digcnt += len1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Because of hardware limitation, we must pre-calculate the inner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * and outer digest that need to be processed firstly by engine, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * apply the result digest to the input message. These complex hashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * procedures limits HMAC performance, so we use fallback SW encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int mtk_sha_finish_hmac(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct mtk_sha_hmac_ctx *bctx = tctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) SHASH_DESC_ON_STACK(shash, bctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) shash->tfm = bctx->shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return crypto_shash_init(shash) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) crypto_shash_finup(shash, req->result, ctx->ds, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Initialize request context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static int mtk_sha_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ctx->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ctx->ds = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) switch (ctx->ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ctx->flags |= SHA_FLAGS_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ctx->bs = SHA1_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ctx->flags |= SHA_FLAGS_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ctx->bs = SHA224_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ctx->flags |= SHA_FLAGS_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ctx->bs = SHA256_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case SHA384_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ctx->flags |= SHA_FLAGS_SHA384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ctx->bs = SHA384_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) case SHA512_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ctx->flags |= SHA_FLAGS_SHA512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ctx->bs = SHA512_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ctx->digcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ctx->buffer = tctx->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (tctx->flags & SHA_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct mtk_sha_hmac_ctx *bctx = tctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) memcpy(ctx->buffer, bctx->ipad, ctx->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ctx->bufcnt = ctx->bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ctx->flags |= SHA_FLAGS_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dma_addr_t addr1, size_t len1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dma_addr_t addr2, size_t len2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct mtk_ring *ring = cryp->ring[sha->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct mtk_desc *cmd, *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int err, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) err = mtk_sha_info_update(cryp, sha, len1, len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Fill in the command/result descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mtk_sha_ring_shift(ring, &cmd, &res, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) MTK_DESC_CT_LEN(ctx->ct_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cmd->buf = cpu_to_le32(addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cmd->ct = cpu_to_le32(ctx->ct_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cmd->ct_hdr = ctx->ct_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) cmd->tfm = cpu_to_le32(ctx->tfm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (len2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mtk_sha_ring_shift(ring, &cmd, &res, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) res->hdr = MTK_DESC_BUF_LEN(len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) cmd->hdr = MTK_DESC_BUF_LEN(len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) cmd->buf = cpu_to_le32(addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) cmd->hdr |= MTK_DESC_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) res->hdr |= MTK_DESC_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Make sure that all changes to the DMA ring are done before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * start engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Start DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int mtk_sha_dma_map(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct mtk_sha_rec *sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct mtk_sha_reqctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) SHA_BUF_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dev_err(cryp->dev, "dma map error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ctx->flags &= ~SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static int mtk_sha_update_slow(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct mtk_sha_rec *sha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u32 final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mtk_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sha->flags |= SHA_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mtk_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return mtk_sha_dma_map(cryp, sha, ctx, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static int mtk_sha_update_start(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct mtk_sha_rec *sha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u32 len, final, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!ctx->total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ctx->bufcnt || ctx->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return mtk_sha_update_slow(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sg = ctx->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!IS_ALIGNED(sg->offset, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return mtk_sha_update_slow(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* size is not ctx->bs aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return mtk_sha_update_slow(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) len = min(ctx->total, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (sg_is_last(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!(ctx->flags & SHA_FLAGS_FINUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* not last sg must be ctx->bs aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) tail = len & (ctx->bs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) len -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ctx->total -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ctx->offset = len; /* offset where to start slow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Add padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tail = len & (ctx->bs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) len -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ctx->total += tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ctx->offset = len; /* offset where to start slow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) sg = ctx->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) mtk_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mtk_sha_fill_padding(ctx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) SHA_BUF_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dev_err(cryp->dev, "dma map bytes error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sha->flags |= SHA_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ctx->flags &= ~SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) count, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ctx->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dev_err(cryp->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ctx->flags |= SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) len, ctx->dma_addr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(cryp->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ctx->flags |= SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) len, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int mtk_sha_final_req(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct mtk_sha_rec *sha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mtk_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) sha->flags |= SHA_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return mtk_sha_dma_map(cryp, sha, ctx, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Copy ready hash (+ finalize hmac) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static int mtk_sha_finish(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) __le32 *digest = ctx->info.digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u32 *result = (u32 *)req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Get the hash from the digest buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) result[i] = le32_to_cpu(digest[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ctx->flags & SHA_FLAGS_HMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return mtk_sha_finish_hmac(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void mtk_sha_finish_req(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct mtk_sha_rec *sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) err = mtk_sha_finish(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sha->req->base.complete(&sha->req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Handle new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) tasklet_schedule(&sha->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct mtk_sha_rec *sha = cryp->sha[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct crypto_async_request *async_req, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct mtk_sha_reqctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int err = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) spin_lock_irqsave(&sha->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = ahash_enqueue_request(&sha->queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (SHA_FLAGS_BUSY & sha->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock_irqrestore(&sha->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) backlog = crypto_get_backlog(&sha->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) async_req = crypto_dequeue_request(&sha->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sha->flags |= SHA_FLAGS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_unlock_irqrestore(&sha->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) req = ahash_request_cast(async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) sha->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mtk_sha_info_init(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ctx->op == SHA_OP_UPDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) err = mtk_sha_update_start(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* No final() after finup() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) err = mtk_sha_final_req(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) } else if (ctx->op == SHA_OP_FINAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) err = mtk_sha_final_req(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (unlikely(err != -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* Task will not finish it, so do it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) mtk_sha_finish_req(cryp, sha, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ctx->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (ctx->flags & SHA_FLAGS_SG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ctx->sg->length == ctx->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ctx->flags & SHA_FLAGS_PAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) dma_unmap_single(cryp->dev, ctx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) SHA_BUF_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dma_unmap_single(cryp->dev, ctx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) SHA_BUF_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void mtk_sha_complete(struct mtk_cryp *cryp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct mtk_sha_rec *sha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) err = mtk_sha_update_start(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) mtk_sha_finish_req(cryp, sha, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static int mtk_sha_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ctx->total = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ctx->sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) !(ctx->flags & SHA_FLAGS_FINUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return mtk_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return mtk_sha_enqueue(req, SHA_OP_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static int mtk_sha_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ctx->flags |= SHA_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (ctx->flags & SHA_FLAGS_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return mtk_sha_finish(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return mtk_sha_enqueue(req, SHA_OP_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int mtk_sha_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int err1, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ctx->flags |= SHA_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) err1 = mtk_sha_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (err1 == -EINPROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) (err1 == -EBUSY && (ahash_request_flags(req) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) CRYPTO_TFM_REQ_MAY_BACKLOG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * final() has to be always called to cleanup resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * even if update() failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) err2 = mtk_sha_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return err1 ?: err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static int mtk_sha_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return mtk_sha_init(req) ?: mtk_sha_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u32 keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct mtk_sha_hmac_ctx *bctx = tctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) size_t bs = crypto_shash_blocksize(bctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) size_t ds = crypto_shash_digestsize(bctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (keylen > bs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bctx->ipad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) keylen = ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) memcpy(bctx->ipad, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) memset(bctx->ipad + keylen, 0, bs - keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) memcpy(bctx->opad, bctx->ipad, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) for (i = 0; i < bs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) bctx->ipad[i] ^= HMAC_IPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) bctx->opad[i] ^= HMAC_OPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int mtk_sha_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) memcpy(out, ctx, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int mtk_sha_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) memcpy(ctx, in, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) const char *alg_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct mtk_cryp *cryp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) cryp = mtk_sha_find_dev(tctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) sizeof(struct mtk_sha_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (alg_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct mtk_sha_hmac_ctx *bctx = tctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tctx->flags |= SHA_FLAGS_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) bctx->shash = crypto_alloc_shash(alg_base, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (IS_ERR(bctx->shash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pr_err("base driver %s could not be loaded.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) alg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return PTR_ERR(bctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int mtk_sha_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return mtk_sha_cra_init_alg(tfm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return mtk_sha_cra_init_alg(tfm, "sha1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return mtk_sha_cra_init_alg(tfm, "sha224");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return mtk_sha_cra_init_alg(tfm, "sha256");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return mtk_sha_cra_init_alg(tfm, "sha384");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return mtk_sha_cra_init_alg(tfm, "sha512");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (tctx->flags & SHA_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct mtk_sha_hmac_ctx *bctx = tctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) crypto_free_shash(bctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static struct ahash_alg algs_sha1_sha224_sha256[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .cra_driver_name = "mtk-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .cra_ctxsize = sizeof(struct mtk_sha_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .cra_init = mtk_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .cra_driver_name = "mtk-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .cra_ctxsize = sizeof(struct mtk_sha_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .cra_init = mtk_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .cra_driver_name = "mtk-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .cra_ctxsize = sizeof(struct mtk_sha_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .cra_init = mtk_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .setkey = mtk_sha_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) .cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) .cra_driver_name = "mtk-hmac-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) sizeof(struct mtk_sha_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .cra_init = mtk_sha_cra_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) .setkey = mtk_sha_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .cra_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .cra_driver_name = "mtk-hmac-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) sizeof(struct mtk_sha_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .cra_init = mtk_sha_cra_sha224_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .setkey = mtk_sha_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) .cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) .cra_driver_name = "mtk-hmac-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) sizeof(struct mtk_sha_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) .cra_init = mtk_sha_cra_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static struct ahash_alg algs_sha384_sha512[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .cra_name = "sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .cra_driver_name = "mtk-sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) .cra_ctxsize = sizeof(struct mtk_sha_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) .cra_init = mtk_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .cra_name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .cra_driver_name = "mtk-sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .cra_ctxsize = sizeof(struct mtk_sha_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .cra_init = mtk_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .setkey = mtk_sha_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .cra_name = "hmac(sha384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .cra_driver_name = "mtk-hmac-sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) sizeof(struct mtk_sha_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .cra_init = mtk_sha_cra_sha384_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .init = mtk_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .update = mtk_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .final = mtk_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .finup = mtk_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .digest = mtk_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .export = mtk_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .import = mtk_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .setkey = mtk_sha_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .halg.statesize = sizeof(struct mtk_sha_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .cra_name = "hmac(sha512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .cra_driver_name = "mtk-hmac-sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) sizeof(struct mtk_sha_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .cra_alignmask = SHA_ALIGN_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .cra_init = mtk_sha_cra_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .cra_exit = mtk_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static void mtk_sha_queue_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void mtk_sha_done_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct mtk_cryp *cryp = sha->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) mtk_sha_unmap(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mtk_sha_complete(cryp, sha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct mtk_cryp *cryp = sha->cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) mtk_sha_write(cryp, RDR_STAT(sha->id), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (likely((SHA_FLAGS_BUSY & sha->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) mtk_sha_write(cryp, RDR_THRESH(sha->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) tasklet_schedule(&sha->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * The purpose of two SHA records is used to get extra performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * It is similar to mtk_aes_record_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static int mtk_sha_record_init(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct mtk_sha_rec **sha = cryp->sha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int i, err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) for (i = 0; i < MTK_REC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!sha[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) sha[i]->cryp = cryp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) spin_lock_init(&sha[i]->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) (unsigned long)sha[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) (unsigned long)sha[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Link to ring2 and ring3 respectively */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) sha[0]->id = MTK_RING2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) sha[1]->id = MTK_RING3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cryp->rec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) kfree(sha[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static void mtk_sha_record_free(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) for (i = 0; i < MTK_REC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) tasklet_kill(&cryp->sha[i]->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) tasklet_kill(&cryp->sha[i]->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) kfree(cryp->sha[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static void mtk_sha_unregister_algs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) crypto_unregister_ahash(&algs_sha384_sha512[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int mtk_sha_register_algs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) goto err_sha_224_256_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) err = crypto_register_ahash(&algs_sha384_sha512[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto err_sha_384_512_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) err_sha_384_512_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) for (; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) crypto_unregister_ahash(&algs_sha384_sha512[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) i = ARRAY_SIZE(algs_sha1_sha224_sha256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) err_sha_224_256_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) for (; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int mtk_hash_alg_register(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) INIT_LIST_HEAD(&cryp->sha_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* Initialize two hash records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) err = mtk_sha_record_init(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) goto err_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 0, "mtk-sha", cryp->sha[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) dev_err(cryp->dev, "unable to request sha irq0.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 0, "mtk-sha", cryp->sha[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) dev_err(cryp->dev, "unable to request sha irq1.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Enable ring2 and ring3 interrupt for hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) spin_lock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) spin_unlock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) err = mtk_sha_register_algs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) spin_lock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) list_del(&cryp->sha_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) spin_unlock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) err_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) mtk_sha_record_free(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) err_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) dev_err(cryp->dev, "mtk-sha initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) void mtk_hash_alg_release(struct mtk_cryp *cryp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) spin_lock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) list_del(&cryp->sha_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) spin_unlock(&mtk_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mtk_sha_unregister_algs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) mtk_sha_record_free(cryp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }