^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file is part of STM32 Crypto driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <crypto/engine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define HASH_CR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define HASH_DIN 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define HASH_STR 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define HASH_IMR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define HASH_SR 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define HASH_HREG(x) (0x310 + ((x) * 0x04))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define HASH_HWCFGR 0x3F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define HASH_VER 0x3F4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define HASH_ID 0x3F8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define HASH_CR_INIT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define HASH_CR_DMAE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define HASH_CR_DATATYPE_POS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define HASH_CR_MODE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define HASH_CR_MDMAT BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define HASH_CR_DMAA BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define HASH_CR_LKEY BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define HASH_CR_ALGO_SHA1 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define HASH_CR_ALGO_MD5 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define HASH_CR_ALGO_SHA224 0x40000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define HASH_CR_ALGO_SHA256 0x40080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define HASH_DINIE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define HASH_DCIE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define HASH_MASK_CALC_COMPLETION BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define HASH_MASK_DATA_INPUT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Context swap register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define HASH_CSR_REGISTER_NUMBER 53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Status Flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define HASH_SR_DATA_INPUT_READY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define HASH_SR_OUTPUT_READY BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define HASH_SR_DMA_ACTIVE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define HASH_SR_BUSY BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* STR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define HASH_STR_NBLW_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define HASH_STR_DCAL BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define HASH_FLAGS_INIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define HASH_FLAGS_OUTPUT_READY BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define HASH_FLAGS_CPU BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define HASH_FLAGS_DMA_READY BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define HASH_FLAGS_DMA_ACTIVE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define HASH_FLAGS_HMAC_INIT BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define HASH_FLAGS_HMAC_FINAL BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define HASH_FLAGS_HMAC_KEY BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define HASH_FLAGS_FINAL BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define HASH_FLAGS_FINUP BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define HASH_FLAGS_MD5 BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define HASH_FLAGS_SHA1 BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define HASH_FLAGS_SHA224 BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define HASH_FLAGS_SHA256 BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define HASH_FLAGS_ERRORS BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define HASH_FLAGS_HMAC BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define HASH_OP_UPDATE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define HASH_OP_FINAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) enum stm32_hash_data_format {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) HASH_DATA_32_BITS = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) HASH_DATA_16_BITS = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) HASH_DATA_8_BITS = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) HASH_DATA_1_BIT = 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define HASH_BUFLEN 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define HASH_LONG_KEY 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define HASH_QUEUE_LENGTH 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define HASH_DMA_THRESHOLD 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define HASH_AUTOSUSPEND_DELAY 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct stm32_hash_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct crypto_engine_ctx enginectx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct stm32_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 key[HASH_MAX_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct stm32_hash_request_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct stm32_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) size_t digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) size_t bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) size_t buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct scatterlist sg_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) size_t dma_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u8 data_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Export Context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 *hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct stm32_hash_algs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct ahash_alg *algs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct stm32_hash_pdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct stm32_hash_algs_info *algs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) size_t algs_info_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct stm32_hash_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void __iomem *io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) phys_addr_t phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 dma_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct crypto_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct dma_chan *dma_lch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct completion dma_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) const struct stm32_hash_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct stm32_hash_drv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spinlock_t lock; /* List protection access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static struct stm32_hash_drv stm32_hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void stm32_hash_dma_callback(void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return readl_relaxed(hdev->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) writel_relaxed(value, hdev->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) !(status & HASH_SR_BUSY), 10, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) reg = stm32_hash_read(hdev, HASH_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) reg &= ~(HASH_STR_NBLW_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) reg |= (8U * ((length) % 4U));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) stm32_hash_write(hdev, HASH_STR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int keylen = ctx->keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void *key = ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) stm32_hash_set_nblw(hdev, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) while (keylen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) keylen -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) key += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) reg = stm32_hash_read(hdev, HASH_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) reg |= HASH_STR_DCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) stm32_hash_write(hdev, HASH_STR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u32 reg = HASH_CR_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!(hdev->flags & HASH_FLAGS_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case HASH_FLAGS_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) reg |= HASH_CR_ALGO_MD5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case HASH_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) reg |= HASH_CR_ALGO_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case HASH_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) reg |= HASH_CR_ALGO_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case HASH_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) reg |= HASH_CR_ALGO_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) reg |= HASH_CR_ALGO_MD5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (rctx->flags & HASH_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) hdev->flags |= HASH_FLAGS_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) reg |= HASH_CR_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (ctx->keylen > HASH_LONG_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) reg |= HASH_CR_LKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) stm32_hash_write(hdev, HASH_CR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) hdev->flags |= HASH_FLAGS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) dev_dbg(hdev->dev, "Write Control %x\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) count = min(rctx->sg->length - rctx->offset, rctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) count = min(count, rctx->buflen - rctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rctx->sg = sg_next(rctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) rctx->offset, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rctx->bufcnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) rctx->offset += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rctx->total -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (rctx->offset == rctx->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rctx->sg = sg_next(rctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (rctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rctx->total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) const u8 *buf, size_t length, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned int count, len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) const u32 *buffer = (const u32 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) hdev->flags |= HASH_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) len32 = DIV_ROUND_UP(length, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) __func__, length, final, len32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) hdev->flags |= HASH_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) stm32_hash_write_ctrl(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if ((hdev->flags & HASH_FLAGS_HMAC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) hdev->flags |= HASH_FLAGS_HMAC_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) stm32_hash_write_key(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (count = 0; count < len32; count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) stm32_hash_write(hdev, HASH_DIN, buffer[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) stm32_hash_set_nblw(hdev, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) reg = stm32_hash_read(hdev, HASH_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) reg |= HASH_STR_DCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) stm32_hash_write(hdev, HASH_STR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (hdev->flags & HASH_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) stm32_hash_write_key(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int bufcnt, err = 0, final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) final = (rctx->flags & HASH_FLAGS_FINUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) while ((rctx->total >= rctx->buflen) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) (rctx->bufcnt + rctx->total >= rctx->buflen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) stm32_hash_append_sg(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bufcnt = rctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) stm32_hash_append_sg(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bufcnt = rctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) rctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) (rctx->flags & HASH_FLAGS_FINUP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct scatterlist *sg, int length, int mdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct dma_async_tx_descriptor *in_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!in_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev_err(hdev->dev, "dmaengine_prep_slave error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) reinit_completion(&hdev->dma_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) in_desc->callback = stm32_hash_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) in_desc->callback_param = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) hdev->flags |= HASH_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) reg = stm32_hash_read(hdev, HASH_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (mdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) reg |= HASH_CR_MDMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) reg &= ~HASH_CR_MDMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) reg |= HASH_CR_DMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) stm32_hash_write(hdev, HASH_CR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) stm32_hash_set_nblw(hdev, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cookie = dmaengine_submit(in_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) err = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dma_async_issue_pending(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!wait_for_completion_timeout(&hdev->dma_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) msecs_to_jiffies(100)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) NULL, NULL) != DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dev_err(hdev->dev, "DMA Error %i\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) dmaengine_terminate_all(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void stm32_hash_dma_callback(void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct stm32_hash_dev *hdev = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) complete(&hdev->dma_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) hdev->flags |= HASH_FLAGS_DMA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) err = stm32_hash_write_key(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) sg_init_one(&rctx->sg_key, ctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ALIGN(ctx->keylen, sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (rctx->dma_ct == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_err(hdev->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct dma_slave_config dma_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) memset(&dma_conf, 0, sizeof(dma_conf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dma_conf.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dma_conf.src_maxburst = hdev->dma_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma_conf.dst_maxburst = hdev->dma_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dma_conf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) chan = dma_request_chan(hdev->dev, "in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (IS_ERR(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return PTR_ERR(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) hdev->dma_lch = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) hdev->dma_lch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) init_completion(&hdev->dma_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct scatterlist sg[1], *tsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int err = 0, len = 0, reg, ncp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u32 *buffer = (void *)rctx->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rctx->sg = hdev->req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rctx->total = hdev->req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) rctx->nents = sg_nents(rctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (rctx->nents < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) stm32_hash_write_ctrl(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (hdev->flags & HASH_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err = stm32_hash_hmac_dma_send(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) for_each_sg(rctx->sg, tsg, rctx->nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sg[0] = *tsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (sg_is_last(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (hdev->dma_mode == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) len = (ALIGN(sg->length, 16) - 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ncp = sg_pcopy_to_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rctx->sg, rctx->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) rctx->buffer, sg->length - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rctx->total - sg->length + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sg->length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sg->length = ALIGN(sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (rctx->dma_ct == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(hdev->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) err = stm32_hash_xmit_dma(hdev, sg, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) !sg_is_last(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (hdev->dma_mode == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) reg = stm32_hash_read(hdev, HASH_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) reg &= ~HASH_CR_DMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) reg |= HASH_CR_DMAA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) stm32_hash_write(hdev, HASH_CR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (ncp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memset(buffer + ncp, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) writesl(hdev->io_base + HASH_DIN, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) DIV_ROUND_UP(ncp, sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) stm32_hash_set_nblw(hdev, ncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) reg = stm32_hash_read(hdev, HASH_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) reg |= HASH_STR_DCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) stm32_hash_write(hdev, HASH_STR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) err = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (hdev->flags & HASH_FLAGS_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (stm32_hash_wait_busy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) err = stm32_hash_hmac_dma_send(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct stm32_hash_dev *hdev = NULL, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_lock_bh(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!ctx->hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) hdev = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ctx->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_unlock_bh(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (req->nbytes <= HASH_DMA_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (sg_nents(req->src) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (hdev->dma_mode == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) for_each_sg(req->src, sg, sg_nents(req->src), i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) (!sg_is_last(sg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (req->src->offset % 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int stm32_hash_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rctx->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) rctx->flags = HASH_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rctx->digcnt = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) switch (rctx->digcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case MD5_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rctx->flags |= HASH_FLAGS_MD5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rctx->flags |= HASH_FLAGS_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) rctx->flags |= HASH_FLAGS_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rctx->flags |= HASH_FLAGS_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) rctx->buflen = HASH_BUFLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rctx->total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) rctx->data_type = HASH_DATA_8_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) memset(rctx->buffer, 0, HASH_BUFLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (ctx->flags & HASH_FLAGS_HMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rctx->flags |= HASH_FLAGS_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return stm32_hash_update_cpu(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct ahash_request *req = hdev->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int buflen = rctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!(rctx->flags & HASH_FLAGS_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) err = stm32_hash_dma_send(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static void stm32_hash_copy_hash(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) __be32 *hash = (void *)rctx->digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned int i, hashsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) case HASH_FLAGS_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) hashsize = MD5_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) case HASH_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) hashsize = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) case HASH_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) hashsize = SHA224_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case HASH_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) hashsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) for (i = 0; i < hashsize / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) HASH_HREG(i)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int stm32_hash_finish(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!req->result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) memcpy(req->result, rctx->digest, rctx->digcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static void stm32_hash_finish_req(struct ahash_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct stm32_hash_dev *hdev = rctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) stm32_hash_copy_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) err = stm32_hash_finish(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) HASH_FLAGS_HMAC_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rctx->flags |= HASH_FLAGS_ERRORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pm_runtime_mark_last_busy(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) pm_runtime_put_autosuspend(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) crypto_finalize_hash_request(hdev->engine, req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct stm32_hash_request_ctx *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pm_runtime_get_sync(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!(HASH_FLAGS_INIT & hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) stm32_hash_write(hdev, HASH_STR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) stm32_hash_write(hdev, HASH_DIN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) stm32_hash_write(hdev, HASH_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) hdev->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return crypto_transfer_hash_request_to_engine(hdev->engine, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct ahash_request *req = container_of(areq, struct ahash_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct stm32_hash_request_ctx *rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) hdev->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rctx->op, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return stm32_hash_hw_init(hdev, rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct ahash_request *req = container_of(areq, struct ahash_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct stm32_hash_request_ctx *rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) hdev->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (rctx->op == HASH_OP_UPDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) err = stm32_hash_update_req(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) else if (rctx->op == HASH_OP_FINAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) err = stm32_hash_final_req(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* done task will not finish it, so do it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) stm32_hash_finish_req(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct stm32_hash_dev *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rctx->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return stm32_hash_handle_queue(hdev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static int stm32_hash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rctx->total = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) rctx->sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) stm32_hash_append_sg(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return stm32_hash_enqueue(req, HASH_OP_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static int stm32_hash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) rctx->flags |= HASH_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return stm32_hash_enqueue(req, HASH_OP_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static int stm32_hash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) int err1, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) rctx->flags |= HASH_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) rctx->flags &= ~HASH_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) err1 = stm32_hash_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (err1 == -EINPROGRESS || err1 == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * final() has to be always called to cleanup resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * even if update() failed, except EINPROGRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err2 = stm32_hash_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return err1 ?: err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static int stm32_hash_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return stm32_hash_init(req) ?: stm32_hash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static int stm32_hash_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) u32 *preg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pm_runtime_get_sync(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) preg = rctx->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *preg++ = stm32_hash_read(hdev, HASH_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) *preg++ = stm32_hash_read(hdev, HASH_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *preg++ = stm32_hash_read(hdev, HASH_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pm_runtime_mark_last_busy(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) pm_runtime_put_autosuspend(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) memcpy(out, rctx, sizeof(*rctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static int stm32_hash_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) const u32 *preg = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) memcpy(rctx, in, sizeof(*rctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) preg = rctx->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pm_runtime_get_sync(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) stm32_hash_write(hdev, HASH_IMR, *preg++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) stm32_hash_write(hdev, HASH_STR, *preg++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) stm32_hash_write(hdev, HASH_CR, *preg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) reg = *preg++ | HASH_CR_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) stm32_hash_write(hdev, HASH_CR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) stm32_hash_write(hdev, HASH_CSR(i), *preg++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) pm_runtime_mark_last_busy(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) pm_runtime_put_autosuspend(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) kfree(rctx->hw_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int stm32_hash_setkey(struct crypto_ahash *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (keylen <= HASH_MAX_KEY_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) memcpy(ctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ctx->keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) const char *algs_hmac_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) sizeof(struct stm32_hash_request_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ctx->keylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (algs_hmac_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ctx->flags |= HASH_FLAGS_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ctx->enginectx.op.do_one_request = stm32_hash_one_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ctx->enginectx.op.unprepare_request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static int stm32_hash_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return stm32_hash_cra_init_algs(tfm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return stm32_hash_cra_init_algs(tfm, "md5");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return stm32_hash_cra_init_algs(tfm, "sha1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return stm32_hash_cra_init_algs(tfm, "sha224");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return stm32_hash_cra_init_algs(tfm, "sha256");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct stm32_hash_dev *hdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (HASH_FLAGS_CPU & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* Finish current request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) stm32_hash_finish_req(hdev->req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct stm32_hash_dev *hdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) reg = stm32_hash_read(hdev, HASH_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (reg & HASH_SR_OUTPUT_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) reg &= ~HASH_SR_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) stm32_hash_write(hdev, HASH_SR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) hdev->flags |= HASH_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* Disable IT*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) stm32_hash_write(hdev, HASH_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static struct ahash_alg algs_md5_sha1[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .cra_name = "md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .cra_driver_name = "stm32-md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .cra_init = stm32_hash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .setkey = stm32_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) .cra_name = "hmac(md5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .cra_driver_name = "stm32-hmac-md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .cra_init = stm32_hash_cra_md5_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .cra_driver_name = "stm32-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) .cra_init = stm32_hash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .setkey = stm32_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .cra_driver_name = "stm32-hmac-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .cra_init = stm32_hash_cra_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static struct ahash_alg algs_sha224_sha256[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) .digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) .cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) .cra_driver_name = "stm32-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) .cra_init = stm32_hash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .setkey = stm32_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .cra_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .cra_driver_name = "stm32-hmac-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .cra_init = stm32_hash_cra_sha224_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .cra_driver_name = "stm32-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .cra_init = stm32_hash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .init = stm32_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .update = stm32_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .final = stm32_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .finup = stm32_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .digest = stm32_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .export = stm32_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .import = stm32_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .setkey = stm32_hash_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .statesize = sizeof(struct stm32_hash_request_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .cra_driver_name = "stm32-hmac-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .cra_priority = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .cra_ctxsize = sizeof(struct stm32_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .cra_init = stm32_hash_cra_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) for (i = 0; i < hdev->pdata->algs_info_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) err = crypto_register_ahash(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) &hdev->pdata->algs_info[i].algs_list[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) for (; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) for (; j--;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) crypto_unregister_ahash(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) &hdev->pdata->algs_info[i].algs_list[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) for (i = 0; i < hdev->pdata->algs_info_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) crypto_unregister_ahash(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) &hdev->pdata->algs_info[i].algs_list[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .algs_list = algs_md5_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .size = ARRAY_SIZE(algs_md5_sha1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .algs_info = stm32_hash_algs_info_stm32f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) .algs_list = algs_md5_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .size = ARRAY_SIZE(algs_md5_sha1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .algs_list = algs_sha224_sha256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .size = ARRAY_SIZE(algs_sha224_sha256),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) .algs_info = stm32_hash_algs_info_stm32f7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static const struct of_device_id stm32_hash_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .compatible = "st,stm32f456-hash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .data = &stm32_hash_pdata_stm32f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) .compatible = "st,stm32f756-hash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) .data = &stm32_hash_pdata_stm32f7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) hdev->pdata = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!hdev->pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dev_err(dev, "no compatible OF match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (of_property_read_u32(dev->of_node, "dma-maxburst",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) &hdev->dma_maxburst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) dev_info(dev, "dma-maxburst not specified, using 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) hdev->dma_maxburst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static int stm32_hash_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct stm32_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) hdev->io_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (IS_ERR(hdev->io_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return PTR_ERR(hdev->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) hdev->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ret = stm32_hash_get_of_match(hdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) stm32_hash_irq_thread, IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) dev_name(dev), hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dev_err(dev, "Cannot grab IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) hdev->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (IS_ERR(hdev->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return dev_err_probe(dev, PTR_ERR(hdev->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) "failed to get clock for hash\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ret = clk_prepare_enable(hdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dev_err(dev, "failed to enable hash clock (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) pm_runtime_use_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) pm_runtime_get_noresume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (IS_ERR(hdev->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) goto err_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) reset_control_assert(hdev->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) reset_control_deassert(hdev->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) hdev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) platform_set_drvdata(pdev, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) ret = stm32_hash_dma_init(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dev_dbg(dev, "DMA mode not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) spin_lock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) list_add_tail(&hdev->list, &stm32_hash.dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) spin_unlock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /* Initialize crypto engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) hdev->engine = crypto_engine_alloc_init(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (!hdev->engine) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto err_engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ret = crypto_engine_start(hdev->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) goto err_engine_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Register algos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) ret = stm32_hash_register_algs(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) err_engine_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) crypto_engine_exit(hdev->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) err_engine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) spin_lock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) list_del(&hdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) spin_unlock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) err_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (hdev->dma_lch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) err_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) clk_disable_unprepare(hdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int stm32_hash_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) struct stm32_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) hdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = pm_runtime_resume_and_get(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) stm32_hash_unregister_algs(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) crypto_engine_exit(hdev->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) spin_lock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) list_del(&hdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) spin_unlock(&stm32_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (hdev->dma_lch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) pm_runtime_disable(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) pm_runtime_put_noidle(hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) clk_disable_unprepare(hdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) static int stm32_hash_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) clk_disable_unprepare(hdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int stm32_hash_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ret = clk_prepare_enable(hdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) dev_err(hdev->dev, "Failed to prepare_enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static const struct dev_pm_ops stm32_hash_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) stm32_hash_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static struct platform_driver stm32_hash_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) .probe = stm32_hash_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .remove = stm32_hash_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) .name = "stm32-hash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) .pm = &stm32_hash_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) .of_match_table = stm32_hash_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) module_platform_driver(stm32_hash_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) MODULE_LICENSE("GPL v2");