^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <crypto/sm3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "cc_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "cc_request_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "cc_buffer_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "cc_hash.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "cc_sram_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define CC_MAX_HASH_SEQ_LEN 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define CC_SM3_HASH_LEN_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct cc_hash_handle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u32 digest_len_sram_addr; /* const value in SRAM*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u32 larval_digest_sram_addr; /* const value in SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct list_head hash_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const u32 cc_digest_len_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const u32 cc_md5_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static const u32 cc_sha1_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static const u32 cc_sha224_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static const u32 cc_sha256_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static const u32 cc_digest_len_sha512_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Due to the way the HW works, every double word in the SHA384 and SHA512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * larval hashes must be stored in hi/lo order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define hilo(x) upper_32_bits(x), lower_32_bits(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static const u32 cc_sha384_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static const u32 cc_sha512_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static const u32 cc_sm3_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int *seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int *seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static const void *cc_larval_digest(struct device *dev, u32 mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct cc_hash_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int hash_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct cc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct ahash_alg ahash_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct hash_key_req_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dma_addr_t key_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u8 *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* hash per-session context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct cc_hash_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct cc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* holds the origin digest; the digest after "setkey" if HMAC,*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the initial digest if HASH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_addr_t digest_buff_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* use for hmac with key large then mode block size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct hash_key_req_ctx key_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int hash_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct completion setkey_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool is_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int flow_mode, struct cc_hw_desc desc[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bool is_not_last_data, unsigned int *seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mode == DRV_HASH_SHA512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) set_bytes_swap(desc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int digestsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) state->digest_result_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) dma_map_single(dev, state->digest_result_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) digestsize, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) digestsize, state->digest_result_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) &state->digest_result_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct cc_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool is_hmac = ctx->is_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) memset(state, 0, sizeof(*state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (is_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ctx->hw_mode != DRV_CIPHER_CMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ctx->inter_digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) memcpy(state->digest_buff, ctx->digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (ctx->hash_mode == DRV_HASH_SHA512 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ctx->hash_mode == DRV_HASH_SHA384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) memcpy(state->digest_bytes_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) cc_digest_len_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) memcpy(state->digest_bytes_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cc_digest_len_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (ctx->hash_mode != DRV_HASH_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_sync_single_for_cpu(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ctx->inter_digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) memcpy(state->opad_digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } else { /*hash*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Copy the initial digests if hash flow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) const void *larval = cc_larval_digest(dev, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) memcpy(state->digest_buff, larval, ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct cc_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bool is_hmac = ctx->is_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) state->digest_buff_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dma_map_single(dev, state->digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ctx->inter_digestsize, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ctx->inter_digestsize, state->digest_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ctx->inter_digestsize, state->digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) &state->digest_buff_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) state->digest_bytes_len_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dma_map_single(dev, state->digest_bytes_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) HASH_MAX_LEN_SIZE, state->digest_bytes_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto unmap_digest_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) HASH_MAX_LEN_SIZE, state->digest_bytes_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) &state->digest_bytes_len_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) state->opad_digest_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dma_map_single(dev, state->opad_digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ctx->inter_digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ctx->inter_digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) state->opad_digest_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) goto unmap_digest_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ctx->inter_digestsize, state->opad_digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) &state->opad_digest_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unmap_digest_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (state->digest_bytes_len_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) state->digest_bytes_len_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unmap_digest_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (state->digest_buff_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dma_unmap_single(dev, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ctx->inter_digestsize, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) state->digest_buff_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct cc_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (state->digest_buff_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_unmap_single(dev, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ctx->inter_digestsize, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) &state->digest_buff_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) state->digest_buff_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (state->digest_bytes_len_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) &state->digest_bytes_len_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) state->digest_bytes_len_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (state->opad_digest_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dma_unmap_single(dev, state->opad_digest_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ctx->inter_digestsize, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) &state->opad_digest_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) state->opad_digest_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int digestsize, u8 *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (state->digest_result_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) state->digest_result_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) &state->digest_result_dma_addr, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) memcpy(result, state->digest_result_buff, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) state->digest_result_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void cc_update_complete(struct device *dev, void *cc_req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct ahash_request *req = (struct ahash_request *)cc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) dev_dbg(dev, "req=%pK\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Not a BACKLOG notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) cc_unmap_hash_request(dev, state, req->src, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ahash_request_complete(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void cc_digest_complete(struct device *dev, void *cc_req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct ahash_request *req = (struct ahash_request *)cc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dev_dbg(dev, "req=%pK\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Not a BACKLOG notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cc_unmap_hash_request(dev, state, req->src, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) cc_unmap_result(dev, state, digestsize, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ahash_request_complete(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void cc_hash_complete(struct device *dev, void *cc_req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct ahash_request *req = (struct ahash_request *)cc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dev_dbg(dev, "req=%pK\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Not a BACKLOG notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cc_unmap_hash_request(dev, state, req->src, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cc_unmap_result(dev, state, digestsize, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ahash_request_complete(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Get final MAC result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cc_set_endianity(ctx->hash_mode, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* store the hash digest result in the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) cc_set_endianity(ctx->hash_mode, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Loading hash opad xor key state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ctx->inter_digestsize, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Load the hash current length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) set_din_sram(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) set_din_no_dma(&desc[idx], 0, 0xfffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) set_dout_no_dma(&desc[idx], 0, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Perform HASH update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) digestsize, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) set_flow_mode(&desc[idx], DIN_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int cc_hash_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct scatterlist *src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned int nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u8 *result = req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bool is_hmac = ctx->is_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u32 larval_digest_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cc_init_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (cc_map_result(dev, state, digestsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dev_err(dev, "map_ahash_digest() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev_err(dev, "map_ahash_request_final() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cc_unmap_result(dev, state, digestsize, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cc_req.user_cb = cc_digest_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* If HMAC then load hash IPAD xor key, if HASH then load initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * digest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (is_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ctx->inter_digestsize, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) set_din_sram(&desc[idx], larval_digest_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Load the hash current length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (is_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ctx->hash_len, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) set_din_const(&desc[idx], 0, ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) set_cipher_do(&desc[idx], DO_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (is_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* HW last hash block padding (aka. "DO_PAD") */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ctx->hash_len, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) set_cipher_do(&desc[idx], DO_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) idx = cc_fin_hmac(desc, req, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) idx = cc_fin_result(desc, req, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) cc_unmap_hash_request(dev, state, src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) cc_unmap_result(dev, state, digestsize, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct ahash_req_ctx *state, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Restore hash digest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ctx->inter_digestsize, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Restore hash current length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ctx->hash_len, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int cc_hash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct scatterlist *src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) unsigned int nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u32 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) "hmac" : "hash", nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* no real updates required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) block_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (rc == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_dbg(dev, " data size not require HW update %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* No hardware updates are required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dev_err(dev, "map_ahash_request_update() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) cc_unmap_hash_request(dev, state, src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) cc_req.user_cb = cc_update_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) idx = cc_restore_hash(desc, ctx, state, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* store the hash digest result in context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ctx->inter_digestsize, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* store current hash length in context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ctx->hash_len, NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) cc_unmap_hash_request(dev, state, src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int cc_do_finup(struct ahash_request *req, bool update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct scatterlist *src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned int nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u8 *result = req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bool is_hmac = ctx->is_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) update ? "finup" : "final", nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dev_err(dev, "map_ahash_request_final() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (cc_map_result(dev, state, digestsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dev_err(dev, "map_ahash_digest() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) cc_unmap_hash_request(dev, state, src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) cc_req.user_cb = cc_hash_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) idx = cc_restore_hash(desc, ctx, state, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Pad the hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) set_cipher_do(&desc[idx], DO_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ctx->hash_len, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (is_hmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) idx = cc_fin_hmac(desc, req, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) idx = cc_fin_result(desc, req, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cc_unmap_hash_request(dev, state, src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) cc_unmap_result(dev, state, digestsize, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int cc_hash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return cc_do_finup(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int cc_hash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return cc_do_finup(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int cc_hash_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) cc_init_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct cc_hash_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int blocksize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int digestsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int i, idx = 0, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u32 larval_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dev_dbg(dev, "start keylen: %d", keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) blocksize = crypto_tfm_alg_blocksize(&ahash->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) digestsize = crypto_ahash_digestsize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * any NON-ZERO value utilizes HMAC flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ctx->key_params.keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ctx->key_params.key_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ctx->is_hmac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ctx->key_params.key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!ctx->key_params.key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ctx->key_params.key_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dma_map_single(dev, ctx->key_params.key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ctx->key_params.key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kfree_sensitive(ctx->key_params.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (keylen > blocksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Load hash initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) set_din_sram(&desc[idx], larval_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Load the hash current length*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) set_din_const(&desc[idx], 0, ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ctx->key_params.key_dma_addr, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) set_flow_mode(&desc[idx], DIN_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Get hashed key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) digestsize, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cc_set_endianity(ctx->hash_mode, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) set_din_const(&desc[idx], 0, (blocksize - digestsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) set_flow_mode(&desc[idx], BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) set_dout_dlli(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) (ctx->opad_tmp_keys_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) digestsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) (blocksize - digestsize), NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ctx->key_params.key_dma_addr, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) set_flow_mode(&desc[idx], BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) keylen, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if ((blocksize - keylen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) set_din_const(&desc[idx], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) (blocksize - keylen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) set_flow_mode(&desc[idx], BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) set_dout_dlli(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (ctx->opad_tmp_keys_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) keylen), (blocksize - keylen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) set_din_const(&desc[idx], 0, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) set_flow_mode(&desc[idx], BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) blocksize, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* calc derived HMAC key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) for (idx = 0, i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* Load hash initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Load the hash current length*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) set_din_const(&desc[idx], 0, ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Prepare ipad key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) set_xor_val(&desc[idx], hmac_pad_const[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) set_flow_mode(&desc[idx], S_DIN_to_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Perform HASH update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) blocksize, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) set_xor_active(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) set_flow_mode(&desc[idx], DIN_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * of the first HASH "update" state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (i > 0) /* Not first iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ctx->inter_digestsize, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) else /* First iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ctx->inter_digestsize, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) set_flow_mode(&desc[idx], S_HASH_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ctx->key_params.key_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dma_unmap_single(dev, ctx->key_params.key_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ctx->key_params.keylen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) kfree_sensitive(ctx->key_params.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static int cc_xcbc_setkey(struct crypto_ahash *ahash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) unsigned int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ctx->key_params.keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (!ctx->key_params.key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ctx->key_params.key_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) kfree_sensitive(ctx->key_params.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ctx->is_hmac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* 1. Load the AES key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) keylen, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) set_key_size_aes(&desc[idx], keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) set_flow_mode(&desc[idx], DIN_AES_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) set_dout_dlli(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) set_flow_mode(&desc[idx], DIN_AES_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) set_dout_dlli(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) set_flow_mode(&desc[idx], DIN_AES_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) set_dout_dlli(&desc[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) dma_unmap_single(dev, ctx->key_params.key_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ctx->key_params.keylen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) kfree_sensitive(ctx->key_params.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int cc_cmac_setkey(struct crypto_ahash *ahash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ctx->is_hmac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ctx->key_params.keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* STAT_PHASE_1: Copy key to ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) keylen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) memcpy(ctx->opad_tmp_keys_buff, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (keylen == 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) memset(ctx->opad_tmp_keys_buff + 24, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) CC_AES_KEY_SIZE_MAX - 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) keylen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ctx->key_params.keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static void cc_free_ctx(struct cc_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (ctx->digest_buff_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dma_unmap_single(dev, ctx->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) &ctx->digest_buff_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ctx->digest_buff_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (ctx->opad_tmp_keys_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sizeof(ctx->opad_tmp_keys_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) &ctx->opad_tmp_keys_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ctx->opad_tmp_keys_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ctx->key_params.keylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ctx->key_params.keylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ctx->digest_buff_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sizeof(ctx->digest_buff), ctx->digest_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) sizeof(ctx->digest_buff), ctx->digest_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) &ctx->digest_buff_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ctx->opad_tmp_keys_dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) dma_map_single(dev, ctx->opad_tmp_keys_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) sizeof(ctx->opad_tmp_keys_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) sizeof(ctx->opad_tmp_keys_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ctx->opad_tmp_keys_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) &ctx->opad_tmp_keys_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ctx->is_hmac = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) cc_free_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static int cc_get_hash_len(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (ctx->hash_mode == DRV_HASH_SM3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return CC_SM3_HASH_LEN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return cc_get_default_hash_len(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int cc_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct hash_alg_common *hash_alg_common =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) container_of(tfm->__crt_alg, struct hash_alg_common, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct ahash_alg *ahash_alg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) container_of(hash_alg_common, struct ahash_alg, halg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct cc_hash_alg *cc_alg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) sizeof(struct ahash_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ctx->hash_mode = cc_alg->hash_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ctx->hw_mode = cc_alg->hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ctx->inter_digestsize = cc_alg->inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ctx->drvdata = cc_alg->drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ctx->hash_len = cc_get_hash_len(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return cc_alloc_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static void cc_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_dbg(dev, "cc_cra_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) cc_free_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int cc_mac_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) u32 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (req->nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* no real updates required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) state->xcbc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) req->nbytes, block_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (rc == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) dev_dbg(dev, " data size not require HW update %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* No hardware updates are required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) dev_err(dev, "map_ahash_request_update() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) cc_setup_xcbc(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) cc_setup_cmac(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* store the hash digest result in context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ctx->inter_digestsize, NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) set_flow_mode(&desc[idx], S_AES_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) cc_req.user_cb = cc_update_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static int cc_mac_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) u32 key_size, key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) u32 rem_cnt = *cc_hash_buf_cnt(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) key_size = CC_AES_128_BIT_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) key_len = CC_AES_128_BIT_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ctx->key_params.keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) key_len = ctx->key_params.keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) req->nbytes, 0, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dev_err(dev, "map_ahash_request_final() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (cc_map_result(dev, state, digestsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) dev_err(dev, "map_ahash_digest() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) cc_req.user_cb = cc_hash_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (state->xcbc_count && rem_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* Load key for ECB decryption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) key_size, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) set_key_size_aes(&desc[idx], key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* Initiate decryption of block state to previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * block_state-XOR-M[n]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) CC_AES_BLOCK_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) CC_AES_BLOCK_SIZE, NS_BIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) set_flow_mode(&desc[idx], DIN_AES_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Memory Barrier: wait for axi write to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) set_din_no_dma(&desc[idx], 0, 0xfffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) set_dout_no_dma(&desc[idx], 0, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) cc_setup_xcbc(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) cc_setup_cmac(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (state->xcbc_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) set_key_size_aes(&desc[idx], key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) set_cmac_size0_mode(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) } else if (rem_cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) set_flow_mode(&desc[idx], DIN_AES_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* Get final MAC result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) digestsize, NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) set_flow_mode(&desc[idx], S_AES_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) cc_unmap_result(dev, state, digestsize, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static int cc_mac_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) u32 key_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (state->xcbc_count > 0 && req->nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return cc_mac_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) req->nbytes, 1, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) dev_err(dev, "map_ahash_request_final() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (cc_map_result(dev, state, digestsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dev_err(dev, "map_ahash_digest() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) cc_req.user_cb = cc_hash_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) key_len = CC_AES_128_BIT_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) cc_setup_xcbc(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) key_len = ctx->key_params.keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) cc_setup_cmac(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (req->nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) set_key_size_aes(&desc[idx], key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) set_cmac_size0_mode(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* Get final MAC result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) digestsize, NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) set_flow_mode(&desc[idx], S_AES_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) cc_unmap_result(dev, state, digestsize, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static int cc_mac_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) u32 digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct cc_crypto_req cc_req = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) u32 key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) unsigned int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) gfp_t flags = cc_gfp_flags(&req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) cc_init_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (cc_map_req(dev, state, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) dev_err(dev, "map_ahash_source() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (cc_map_result(dev, state, digestsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) dev_err(dev, "map_ahash_digest() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) req->nbytes, 1, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) dev_err(dev, "map_ahash_request_final() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* Setup request structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) cc_req.user_cb = cc_digest_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) cc_req.user_arg = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) key_len = CC_AES_128_BIT_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) cc_setup_xcbc(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) key_len = ctx->key_params.keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) cc_setup_cmac(req, desc, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (req->nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) set_key_size_aes(&desc[idx], key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) set_cmac_size0_mode(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /* Get final MAC result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) CC_AES_BLOCK_SIZE, NS_BIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) set_queue_last_ind(ctx->drvdata, &desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) set_flow_mode(&desc[idx], S_AES_to_DOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) set_cipher_mode(&desc[idx], ctx->hw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (rc != -EINPROGRESS && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) dev_err(dev, "send_request() failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) cc_unmap_hash_request(dev, state, req->src, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) cc_unmap_result(dev, state, digestsize, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) cc_unmap_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static int cc_hash_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) u8 *curr_buff = cc_hash_buf(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) const u32 tmp = CC_EXPORT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) memcpy(out, &tmp, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) out += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) memcpy(out, state->digest_buff, ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) out += ctx->inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) memcpy(out, state->digest_bytes_len, ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) out += ctx->hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) memcpy(out, &curr_buff_cnt, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) out += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) memcpy(out, curr_buff, curr_buff_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) static int cc_hash_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct ahash_req_ctx *state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) memcpy(&tmp, in, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (tmp != CC_EXPORT_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) in += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cc_init_req(dev, state, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) memcpy(state->digest_buff, in, ctx->inter_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) in += ctx->inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) memcpy(state->digest_bytes_len, in, ctx->hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) in += ctx->hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* Sanity check the data as much as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) memcpy(&tmp, in, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (tmp > CC_MAX_HASH_BLCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) in += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) state->buf_cnt[0] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) memcpy(state->buffers[0], in, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct cc_hash_template {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) char name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) char driver_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) char mac_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) char mac_driver_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) bool is_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) bool synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct ahash_alg template_ahash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) int hash_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) int hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) int inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct cc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) u32 min_hw_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) enum cc_std_body std_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) #define CC_STATE_SIZE(_x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* hash descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static struct cc_hash_template driver_hash[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) //Asynchronize hash template
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) .name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .driver_name = "sha1-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .mac_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .mac_driver_name = "hmac-sha1-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) .synchronize = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) .digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) .hash_mode = DRV_HASH_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) .hw_mode = DRV_HASH_HW_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) .inter_digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) .name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) .driver_name = "sha256-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) .mac_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) .mac_driver_name = "hmac-sha256-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) .blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) .hash_mode = DRV_HASH_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) .hw_mode = DRV_HASH_HW_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) .inter_digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) .name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) .driver_name = "sha224-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) .mac_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) .mac_driver_name = "hmac-sha224-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) .blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) .hash_mode = DRV_HASH_SHA224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) .hw_mode = DRV_HASH_HW_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) .inter_digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .name = "sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) .driver_name = "sha384-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) .mac_name = "hmac(sha384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) .mac_driver_name = "hmac-sha384-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) .blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) .digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) .hash_mode = DRV_HASH_SHA384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) .hw_mode = DRV_HASH_HW_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) .inter_digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) .min_hw_rev = CC_HW_REV_712,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) .name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) .driver_name = "sha512-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) .mac_name = "hmac(sha512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) .mac_driver_name = "hmac-sha512-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) .blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) .digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) .hash_mode = DRV_HASH_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) .hw_mode = DRV_HASH_HW_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) .inter_digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) .min_hw_rev = CC_HW_REV_712,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) .name = "md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) .driver_name = "md5-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) .mac_name = "hmac(md5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) .mac_driver_name = "hmac-md5-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) .blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) .digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) .hash_mode = DRV_HASH_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) .hw_mode = DRV_HASH_HW_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) .inter_digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) .name = "sm3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) .driver_name = "sm3-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) .blocksize = SM3_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) .is_mac = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) .update = cc_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) .final = cc_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) .finup = cc_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) .digest = cc_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) .setkey = cc_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) .digestsize = SM3_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) .hash_mode = DRV_HASH_SM3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) .hw_mode = DRV_HASH_HW_SM3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) .inter_digestsize = SM3_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) .min_hw_rev = CC_HW_REV_713,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) .std_body = CC_STD_OSCCA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) .mac_name = "xcbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) .mac_driver_name = "xcbc-aes-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) .blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) .update = cc_mac_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) .final = cc_mac_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .finup = cc_mac_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .digest = cc_mac_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .setkey = cc_xcbc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) .digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) .hash_mode = DRV_HASH_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) .hw_mode = DRV_CIPHER_XCBC_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) .inter_digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) .mac_name = "cmac(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) .mac_driver_name = "cmac-aes-ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) .blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) .is_mac = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) .template_ahash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) .init = cc_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .update = cc_mac_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .final = cc_mac_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) .finup = cc_mac_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) .digest = cc_mac_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) .setkey = cc_cmac_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) .export = cc_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) .import = cc_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) .digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) .hash_mode = DRV_HASH_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) .hw_mode = DRV_CIPHER_CMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) .inter_digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) .min_hw_rev = CC_HW_REV_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) .std_body = CC_STD_NIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct device *dev, bool keyed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) struct cc_hash_alg *t_crypto_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct crypto_alg *alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct ahash_alg *halg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (!t_crypto_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) t_crypto_alg->ahash_alg = template->template_ahash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) halg = &t_crypto_alg->ahash_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) alg = &halg->halg.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (keyed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) template->mac_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) template->mac_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) halg->setkey = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) template->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) template->driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) alg->cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) alg->cra_priority = CC_CRA_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) alg->cra_blocksize = template->blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) alg->cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) alg->cra_exit = cc_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) alg->cra_init = cc_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) t_crypto_alg->hash_mode = template->hash_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) t_crypto_alg->hw_mode = template->hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) t_crypto_alg->inter_digestsize = template->inter_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return t_crypto_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) unsigned int size, u32 *sram_buff_ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) unsigned int larval_seq_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) larval_seq, &larval_seq_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) rc = send_request_init(drvdata, larval_seq, larval_seq_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) *sram_buff_ofs += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) int cc_init_hash_sram(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct cc_hash_handle *hash_handle = drvdata->hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) /* Copy-to-sram digest-len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) sizeof(cc_digest_len_init), &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (large_sha_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /* Copy-to-sram digest-len for sha384/512 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) sizeof(cc_digest_len_sha512_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* The initial digests offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) hash_handle->larval_digest_sram_addr = sram_buff_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* Copy-to-sram initial SHA* digests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (sm3_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) rc = cc_init_copy_sram(drvdata, cc_sm3_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) sizeof(cc_sm3_init), &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (large_sha_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) rc = cc_init_copy_sram(drvdata, cc_sha384_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) sizeof(cc_sha384_init), &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) rc = cc_init_copy_sram(drvdata, cc_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) sizeof(cc_sha512_init), &sram_buff_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) goto init_digest_const_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) init_digest_const_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) int cc_hash_alloc(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct cc_hash_handle *hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) u32 sram_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) u32 sram_size_to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct device *dev = drvdata_to_dev(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) int alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!hash_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) INIT_LIST_HEAD(&hash_handle->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) drvdata->hash_handle = hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sram_size_to_alloc = sizeof(cc_digest_len_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) sizeof(cc_sha1_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) sizeof(cc_sha224_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) sizeof(cc_sha256_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (drvdata->hw_rev >= CC_HW_REV_713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) sram_size_to_alloc += sizeof(cc_sm3_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (drvdata->hw_rev >= CC_HW_REV_712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (sram_buff == NULL_SRAM_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* The initial digest-len offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) hash_handle->digest_len_sram_addr = sram_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /*must be set before the alg registration as it is being used there*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) rc = cc_init_hash_sram(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /* ahash registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct cc_hash_alg *t_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) int hw_mode = driver_hash[alg].hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /* Check that the HW revision and variants are suitable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) !(drvdata->std_bodies & driver_hash[alg].std_body))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (driver_hash[alg].is_mac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* register hmac version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (IS_ERR(t_alg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) rc = PTR_ERR(t_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) dev_err(dev, "%s alg allocation failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) driver_hash[alg].driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) t_alg->drvdata = drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) rc = crypto_register_ahash(&t_alg->ahash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) dev_err(dev, "%s alg registration failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) driver_hash[alg].driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) list_add_tail(&t_alg->entry, &hash_handle->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (hw_mode == DRV_CIPHER_XCBC_MAC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) hw_mode == DRV_CIPHER_CMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* register hash version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (IS_ERR(t_alg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) rc = PTR_ERR(t_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) dev_err(dev, "%s alg allocation failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) driver_hash[alg].driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) t_alg->drvdata = drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) rc = crypto_register_ahash(&t_alg->ahash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) dev_err(dev, "%s alg registration failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) driver_hash[alg].driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) list_add_tail(&t_alg->entry, &hash_handle->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) cc_hash_free(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) int cc_hash_free(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct cc_hash_alg *t_hash_alg, *hash_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) struct cc_hash_handle *hash_handle = drvdata->hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) crypto_unregister_ahash(&t_hash_alg->ahash_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) list_del(&t_hash_alg->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) unsigned int *seq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) unsigned int idx = *seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct ahash_req_ctx *state = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* Setup XCBC MAC K1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) XCBC_MAC_K1_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) CC_AES_128_BIT_KEY_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* Setup XCBC MAC K2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) CC_AES_128_BIT_KEY_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* Setup XCBC MAC K3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) CC_AES_128_BIT_KEY_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* Loading MAC state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) CC_AES_BLOCK_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) *seq_size = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) unsigned int *seq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) unsigned int idx = *seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct ahash_req_ctx *state = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* Setup CMAC Key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) ctx->key_params.keylen), NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) set_key_size_aes(&desc[idx], ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) /* Load MAC state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) CC_AES_BLOCK_SIZE, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) set_key_size_aes(&desc[idx], ctx->key_params.keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) set_flow_mode(&desc[idx], S_DIN_to_AES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) *seq_size = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) struct cc_hash_ctx *ctx, unsigned int flow_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct cc_hw_desc desc[], bool is_not_last_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) unsigned int *seq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) unsigned int idx = *seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct device *dev = drvdata_to_dev(ctx->drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) sg_dma_address(areq_ctx->curr_sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) areq_ctx->curr_sg->length, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) set_flow_mode(&desc[idx], flow_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) dev_dbg(dev, " NULL mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) /* nothing to build */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /* bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) set_din_type(&desc[idx], DMA_DLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) areq_ctx->mlli_params.mlli_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) areq_ctx->mlli_params.mlli_len, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) areq_ctx->mlli_params.mlli_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) set_flow_mode(&desc[idx], BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /* process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) hw_desc_init(&desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) set_din_type(&desc[idx], DMA_MLLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ctx->drvdata->mlli_sram_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) areq_ctx->mlli_nents, NS_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) set_flow_mode(&desc[idx], flow_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (is_not_last_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) set_din_not_last_indication(&desc[(idx - 1)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /* return updated desc sequence size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) *seq_size = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static const void *cc_larval_digest(struct device *dev, u32 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) case DRV_HASH_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return cc_md5_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) case DRV_HASH_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return cc_sha1_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) case DRV_HASH_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return cc_sha224_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) case DRV_HASH_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return cc_sha256_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) case DRV_HASH_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return cc_sha384_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) case DRV_HASH_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return cc_sha512_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) case DRV_HASH_SM3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return cc_sm3_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) dev_err(dev, "Invalid hash mode (%d)\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return cc_md5_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * according to the given hash mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * @drvdata: Associated device driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * The address of the initial digest in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) u32 cc_larval_digest_addr(void *drvdata, u32 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) struct device *dev = drvdata_to_dev(_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) case DRV_HASH_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) break; /*Ignore*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) case DRV_HASH_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return (hash_handle->larval_digest_sram_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) case DRV_HASH_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) sizeof(cc_md5_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) case DRV_HASH_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) sizeof(cc_sha1_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) case DRV_HASH_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) sizeof(cc_sha1_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) sizeof(cc_sha224_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) case DRV_HASH_SM3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) sizeof(cc_sha1_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) sizeof(cc_sha224_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) sizeof(cc_sha256_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) case DRV_HASH_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) addr = (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) sizeof(cc_sha1_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) sizeof(cc_sha224_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) sizeof(cc_sha256_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (sm3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) addr += sizeof(cc_sm3_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) case DRV_HASH_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) addr = (hash_handle->larval_digest_sram_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) sizeof(cc_md5_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) sizeof(cc_sha1_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) sizeof(cc_sha224_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) sizeof(cc_sha256_init) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) sizeof(cc_sha384_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (sm3_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) addr += sizeof(cc_sm3_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) dev_err(dev, "Invalid hash mode (%d)\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /*This is valid wrong value to avoid kernel crash*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return hash_handle->larval_digest_sram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) u32 cc_digest_len_addr(void *drvdata, u32 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) u32 digest_len_addr = hash_handle->digest_len_sram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) case DRV_HASH_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) case DRV_HASH_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) case DRV_HASH_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) case DRV_HASH_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return digest_len_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) case DRV_HASH_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) case DRV_HASH_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) return digest_len_addr + sizeof(cc_digest_len_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return digest_len_addr; /*to avoid kernel crash*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }